[{"data":1,"prerenderedAt":1277},["ShallowReactive",2],{"/en-us/blog":3,"navigation-en-us":20,"banner-en-us":431,"footer-en-us":441,"blogCategories-en-us":683,"relatedBlogPosts-en-us":817,"mainFeaturedPost-en-us":1239,"recentFeaturedPosts-en-us":1244,"recentPosts-en-us":1262},{"id":4,"title":5,"body":6,"category":6,"config":7,"content":9,"description":6,"extension":11,"meta":12,"navigation":13,"path":14,"seo":15,"slug":6,"stem":18,"testContent":6,"type":6,"__hash__":19},"pages/en-us/blog/index.yml","",null,{"template":8},"BlogHome",{"title":10},"GitLab Blog","yml",{},true,"/en-us/blog",{"title":16,"description":17},"Blog | GitLab","Tutorials, product information, expert insights, and more from GitLab to help DevSecOps teams build, test, and deploy secure software faster.","en-us/blog/index","uG0QGFGbRgp8AvAwGLhaM8YlKMxyuUKZ2haDexExj9g",{"data":21},{"logo":22,"freeTrial":27,"sales":32,"login":37,"items":42,"search":351,"minimal":382,"duo":401,"switchNav":410,"pricingDeployment":421},{"config":23},{"href":24,"dataGaName":25,"dataGaLocation":26},"/","gitlab logo","header",{"text":28,"config":29},"Get free trial",{"href":30,"dataGaName":31,"dataGaLocation":26},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":33,"config":34},"Talk to sales",{"href":35,"dataGaName":36,"dataGaLocation":26},"/sales/","sales",{"text":38,"config":39},"Sign in",{"href":40,"dataGaName":41,"dataGaLocation":26},"https://gitlab.com/users/sign_in/","sign in",[43,70,165,170,272,332],{"text":44,"config":45,"cards":47},"Platform",{"dataNavLevelOne":46},"platform",[48,54,62],{"title":44,"description":49,"link":50},"The intelligent orchestration platform for DevSecOps",{"text":51,"config":52},"Explore our Platform",{"href":53,"dataGaName":46,"dataGaLocation":26},"/platform/",{"title":55,"description":56,"link":57},"GitLab Duo Agent Platform","Agentic AI for the entire software lifecycle",{"text":58,"config":59},"Meet GitLab Duo",{"href":60,"dataGaName":61,"dataGaLocation":26},"/gitlab-duo-agent-platform/","gitlab duo agent platform",{"title":63,"description":64,"link":65},"Why GitLab","See the top reasons enterprises choose GitLab",{"text":66,"config":67},"Learn more",{"href":68,"dataGaName":69,"dataGaLocation":26},"/why-gitlab/","why gitlab",{"text":71,"left":13,"config":72,"link":74,"lists":78,"footer":147},"Product",{"dataNavLevelOne":73},"solutions",{"text":75,"config":76},"View all Solutions",{"href":77,"dataGaName":73,"dataGaLocation":26},"/solutions/",[79,103,126],{"title":80,"description":81,"link":82,"items":87},"Automation","CI/CD and automation to accelerate deployment",{"config":83},{"icon":84,"href":85,"dataGaName":86,"dataGaLocation":26},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[88,92,95,99],{"text":89,"config":90},"CI/CD",{"href":91,"dataGaLocation":26,"dataGaName":89},"/solutions/continuous-integration/",{"text":55,"config":93},{"href":60,"dataGaLocation":26,"dataGaName":94},"gitlab duo agent platform - product menu",{"text":96,"config":97},"Source Code Management",{"href":98,"dataGaLocation":26,"dataGaName":96},"/solutions/source-code-management/",{"text":100,"config":101},"Automated Software Delivery",{"href":85,"dataGaLocation":26,"dataGaName":102},"Automated software delivery",{"title":104,"description":105,"link":106,"items":111},"Security","Deliver code faster without compromising security",{"config":107},{"href":108,"dataGaName":109,"dataGaLocation":26,"icon":110},"/solutions/application-security-testing/","security and compliance","ShieldCheckLight",[112,116,121],{"text":113,"config":114},"Application Security Testing",{"href":108,"dataGaName":115,"dataGaLocation":26},"Application security testing",{"text":117,"config":118},"Software Supply Chain Security",{"href":119,"dataGaLocation":26,"dataGaName":120},"/solutions/supply-chain/","Software supply chain security",{"text":122,"config":123},"Software Compliance",{"href":124,"dataGaName":125,"dataGaLocation":26},"/solutions/software-compliance/","software compliance",{"title":127,"link":128,"items":133},"Measurement",{"config":129},{"icon":130,"href":131,"dataGaName":132,"dataGaLocation":26},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[134,138,142],{"text":135,"config":136},"Visibility & Measurement",{"href":131,"dataGaLocation":26,"dataGaName":137},"Visibility and Measurement",{"text":139,"config":140},"Value Stream Management",{"href":141,"dataGaLocation":26,"dataGaName":139},"/solutions/value-stream-management/",{"text":143,"config":144},"Analytics & Insights",{"href":145,"dataGaLocation":26,"dataGaName":146},"/solutions/analytics-and-insights/","Analytics and insights",{"title":148,"items":149},"GitLab for",[150,155,160],{"text":151,"config":152},"Enterprise",{"href":153,"dataGaLocation":26,"dataGaName":154},"/enterprise/","enterprise",{"text":156,"config":157},"Small Business",{"href":158,"dataGaLocation":26,"dataGaName":159},"/small-business/","small business",{"text":161,"config":162},"Public Sector",{"href":163,"dataGaLocation":26,"dataGaName":164},"/solutions/public-sector/","public sector",{"text":166,"config":167},"Pricing",{"href":168,"dataGaName":169,"dataGaLocation":26,"dataNavLevelOne":169},"/pricing/","pricing",{"text":171,"config":172,"link":174,"lists":178,"feature":263},"Resources",{"dataNavLevelOne":173},"resources",{"text":175,"config":176},"View all resources",{"href":177,"dataGaName":173,"dataGaLocation":26},"/resources/",[179,212,235],{"title":180,"items":181},"Getting started",[182,187,192,197,202,207],{"text":183,"config":184},"Install",{"href":185,"dataGaName":186,"dataGaLocation":26},"/install/","install",{"text":188,"config":189},"Quick start guides",{"href":190,"dataGaName":191,"dataGaLocation":26},"/get-started/","quick setup checklists",{"text":193,"config":194},"Learn",{"href":195,"dataGaLocation":26,"dataGaName":196},"https://university.gitlab.com/","learn",{"text":198,"config":199},"Product documentation",{"href":200,"dataGaName":201,"dataGaLocation":26},"https://docs.gitlab.com/","product documentation",{"text":203,"config":204},"Best practice videos",{"href":205,"dataGaName":206,"dataGaLocation":26},"/getting-started-videos/","best practice videos",{"text":208,"config":209},"Integrations",{"href":210,"dataGaName":211,"dataGaLocation":26},"/integrations/","integrations",{"title":213,"items":214},"Discover",[215,220,225,230],{"text":216,"config":217},"Customer success stories",{"href":218,"dataGaName":219,"dataGaLocation":26},"/customers/","customer success stories",{"text":221,"config":222},"Blog",{"href":223,"dataGaName":224,"dataGaLocation":26},"/blog/","blog",{"text":226,"config":227},"The Source",{"href":228,"dataGaName":229,"dataGaLocation":26},"/the-source/","the source",{"text":231,"config":232},"Remote",{"href":233,"dataGaName":234,"dataGaLocation":26},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"title":236,"items":237},"Connect",[238,243,248,253,258],{"text":239,"config":240},"GitLab Services",{"href":241,"dataGaName":242,"dataGaLocation":26},"/services/","services",{"text":244,"config":245},"Community",{"href":246,"dataGaName":247,"dataGaLocation":26},"/community/","community",{"text":249,"config":250},"Forum",{"href":251,"dataGaName":252,"dataGaLocation":26},"https://forum.gitlab.com/","forum",{"text":254,"config":255},"Events",{"href":256,"dataGaName":257,"dataGaLocation":26},"/events/","events",{"text":259,"config":260},"Partners",{"href":261,"dataGaName":262,"dataGaLocation":26},"/partners/","partners",{"textColor":264,"title":265,"text":266,"link":267},"#000","What’s new in GitLab","Stay updated with our latest features and improvements.",{"text":268,"config":269},"Read the latest",{"href":270,"dataGaName":271,"dataGaLocation":26},"/releases/whats-new/","whats new",{"text":273,"config":274,"lists":276},"Company",{"dataNavLevelOne":275},"company",[277],{"items":278},[279,284,290,292,297,302,307,312,317,322,327],{"text":280,"config":281},"About",{"href":282,"dataGaName":283,"dataGaLocation":26},"/company/","about",{"text":285,"config":286,"footerGa":289},"Jobs",{"href":287,"dataGaName":288,"dataGaLocation":26},"/jobs/","jobs",{"dataGaName":288},{"text":254,"config":291},{"href":256,"dataGaName":257,"dataGaLocation":26},{"text":293,"config":294},"Leadership",{"href":295,"dataGaName":296,"dataGaLocation":26},"/company/team/e-group/","leadership",{"text":298,"config":299},"Team",{"href":300,"dataGaName":301,"dataGaLocation":26},"/company/team/","team",{"text":303,"config":304},"Handbook",{"href":305,"dataGaName":306,"dataGaLocation":26},"https://handbook.gitlab.com/","handbook",{"text":308,"config":309},"Investor relations",{"href":310,"dataGaName":311,"dataGaLocation":26},"https://ir.gitlab.com/","investor relations",{"text":313,"config":314},"Trust Center",{"href":315,"dataGaName":316,"dataGaLocation":26},"/security/","trust center",{"text":318,"config":319},"AI Transparency Center",{"href":320,"dataGaName":321,"dataGaLocation":26},"/ai-transparency-center/","ai transparency center",{"text":323,"config":324},"Newsletter",{"href":325,"dataGaName":326,"dataGaLocation":26},"/company/contact/#contact-forms","newsletter",{"text":328,"config":329},"Press",{"href":330,"dataGaName":331,"dataGaLocation":26},"/press/","press",{"text":333,"config":334,"lists":335},"Contact us",{"dataNavLevelOne":275},[336],{"items":337},[338,341,346],{"text":33,"config":339},{"href":35,"dataGaName":340,"dataGaLocation":26},"talk to sales",{"text":342,"config":343},"Support portal",{"href":344,"dataGaName":345,"dataGaLocation":26},"https://support.gitlab.com","support portal",{"text":347,"config":348},"Customer portal",{"href":349,"dataGaName":350,"dataGaLocation":26},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":352,"login":353,"suggestions":360},"Close",{"text":354,"link":355},"To search repositories and projects, login to",{"text":356,"config":357},"gitlab.com",{"href":40,"dataGaName":358,"dataGaLocation":359},"search login","search",{"text":361,"default":362},"Suggestions",[363,365,369,371,375,379],{"text":55,"config":364},{"href":60,"dataGaName":55,"dataGaLocation":359},{"text":366,"config":367},"Code Suggestions (AI)",{"href":368,"dataGaName":366,"dataGaLocation":359},"/solutions/code-suggestions/",{"text":89,"config":370},{"href":91,"dataGaName":89,"dataGaLocation":359},{"text":372,"config":373},"GitLab on AWS",{"href":374,"dataGaName":372,"dataGaLocation":359},"/partners/technology-partners/aws/",{"text":376,"config":377},"GitLab on Google Cloud",{"href":378,"dataGaName":376,"dataGaLocation":359},"/partners/technology-partners/google-cloud-platform/",{"text":380,"config":381},"Why GitLab?",{"href":68,"dataGaName":380,"dataGaLocation":359},{"freeTrial":383,"mobileIcon":388,"desktopIcon":393,"secondaryButton":396},{"text":384,"config":385},"Start free trial",{"href":386,"dataGaName":31,"dataGaLocation":387},"https://gitlab.com/-/trials/new/","nav",{"altText":389,"config":390},"Gitlab Icon",{"src":391,"dataGaName":392,"dataGaLocation":387},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203874/jypbw1jx72aexsoohd7x.svg","gitlab icon",{"altText":389,"config":394},{"src":395,"dataGaName":392,"dataGaLocation":387},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203875/gs4c8p8opsgvflgkswz9.svg",{"text":397,"config":398},"Get Started",{"href":399,"dataGaName":400,"dataGaLocation":387},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/get-started/","get started",{"freeTrial":402,"mobileIcon":406,"desktopIcon":408},{"text":403,"config":404},"Learn more about GitLab Duo",{"href":60,"dataGaName":405,"dataGaLocation":387},"gitlab duo",{"altText":389,"config":407},{"src":391,"dataGaName":392,"dataGaLocation":387},{"altText":389,"config":409},{"src":395,"dataGaName":392,"dataGaLocation":387},{"button":411,"mobileIcon":416,"desktopIcon":418},{"text":412,"config":413},"/switch",{"href":414,"dataGaName":415,"dataGaLocation":387},"#contact","switch",{"altText":389,"config":417},{"src":391,"dataGaName":392,"dataGaLocation":387},{"altText":389,"config":419},{"src":420,"dataGaName":392,"dataGaLocation":387},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1773335277/ohhpiuoxoldryzrnhfrh.png",{"freeTrial":422,"mobileIcon":427,"desktopIcon":429},{"text":423,"config":424},"Back to pricing",{"href":168,"dataGaName":425,"dataGaLocation":387,"icon":426},"back to pricing","GoBack",{"altText":389,"config":428},{"src":391,"dataGaName":392,"dataGaLocation":387},{"altText":389,"config":430},{"src":395,"dataGaName":392,"dataGaLocation":387},{"title":432,"button":433,"config":438},"See how agentic AI transforms software delivery",{"text":434,"config":435},"Watch GitLab Transcend now",{"href":436,"dataGaName":437,"dataGaLocation":26},"/events/transcend/virtual/","transcend event",{"layout":439,"icon":440,"disabled":13},"release","AiStar",{"data":442},{"text":443,"source":444,"edit":450,"contribute":455,"config":460,"items":465,"minimal":672},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":445,"config":446},"View page source",{"href":447,"dataGaName":448,"dataGaLocation":449},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":451,"config":452},"Edit this page",{"href":453,"dataGaName":454,"dataGaLocation":449},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":456,"config":457},"Please contribute",{"href":458,"dataGaName":459,"dataGaLocation":449},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":461,"facebook":462,"youtube":463,"linkedin":464},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[466,513,567,611,638],{"title":166,"links":467,"subMenu":482},[468,472,477],{"text":469,"config":470},"View plans",{"href":168,"dataGaName":471,"dataGaLocation":449},"view plans",{"text":473,"config":474},"Why Premium?",{"href":475,"dataGaName":476,"dataGaLocation":449},"/pricing/premium/","why premium",{"text":478,"config":479},"Why Ultimate?",{"href":480,"dataGaName":481,"dataGaLocation":449},"/pricing/ultimate/","why ultimate",[483],{"title":484,"links":485},"Contact Us",[486,489,491,493,498,503,508],{"text":487,"config":488},"Contact sales",{"href":35,"dataGaName":36,"dataGaLocation":449},{"text":342,"config":490},{"href":344,"dataGaName":345,"dataGaLocation":449},{"text":347,"config":492},{"href":349,"dataGaName":350,"dataGaLocation":449},{"text":494,"config":495},"Status",{"href":496,"dataGaName":497,"dataGaLocation":449},"https://status.gitlab.com/","status",{"text":499,"config":500},"Terms of use",{"href":501,"dataGaName":502,"dataGaLocation":449},"/terms/","terms of use",{"text":504,"config":505},"Privacy statement",{"href":506,"dataGaName":507,"dataGaLocation":449},"/privacy/","privacy statement",{"text":509,"config":510},"Cookie preferences",{"dataGaName":511,"dataGaLocation":449,"id":512,"isOneTrustButton":13},"cookie preferences","ot-sdk-btn",{"title":71,"links":514,"subMenu":523},[515,519],{"text":516,"config":517},"DevSecOps platform",{"href":53,"dataGaName":518,"dataGaLocation":449},"devsecops platform",{"text":520,"config":521},"AI-Assisted Development",{"href":60,"dataGaName":522,"dataGaLocation":449},"ai-assisted development",[524],{"title":525,"links":526},"Topics",[527,532,537,542,547,552,557,562],{"text":528,"config":529},"CICD",{"href":530,"dataGaName":531,"dataGaLocation":449},"/topics/ci-cd/","cicd",{"text":533,"config":534},"GitOps",{"href":535,"dataGaName":536,"dataGaLocation":449},"/topics/gitops/","gitops",{"text":538,"config":539},"DevOps",{"href":540,"dataGaName":541,"dataGaLocation":449},"/topics/devops/","devops",{"text":543,"config":544},"Version Control",{"href":545,"dataGaName":546,"dataGaLocation":449},"/topics/version-control/","version control",{"text":548,"config":549},"DevSecOps",{"href":550,"dataGaName":551,"dataGaLocation":449},"/topics/devsecops/","devsecops",{"text":553,"config":554},"Cloud Native",{"href":555,"dataGaName":556,"dataGaLocation":449},"/topics/cloud-native/","cloud native",{"text":558,"config":559},"AI for Coding",{"href":560,"dataGaName":561,"dataGaLocation":449},"/topics/devops/ai-for-coding/","ai for coding",{"text":563,"config":564},"Agentic AI",{"href":565,"dataGaName":566,"dataGaLocation":449},"/topics/agentic-ai/","agentic ai",{"title":568,"links":569},"Solutions",[570,572,574,579,583,586,590,593,595,598,601,606],{"text":113,"config":571},{"href":108,"dataGaName":113,"dataGaLocation":449},{"text":102,"config":573},{"href":85,"dataGaName":86,"dataGaLocation":449},{"text":575,"config":576},"Agile development",{"href":577,"dataGaName":578,"dataGaLocation":449},"/solutions/agile-delivery/","agile delivery",{"text":580,"config":581},"SCM",{"href":98,"dataGaName":582,"dataGaLocation":449},"source code management",{"text":528,"config":584},{"href":91,"dataGaName":585,"dataGaLocation":449},"continuous integration & delivery",{"text":587,"config":588},"Value stream management",{"href":141,"dataGaName":589,"dataGaLocation":449},"value stream management",{"text":533,"config":591},{"href":592,"dataGaName":536,"dataGaLocation":449},"/solutions/gitops/",{"text":151,"config":594},{"href":153,"dataGaName":154,"dataGaLocation":449},{"text":596,"config":597},"Small business",{"href":158,"dataGaName":159,"dataGaLocation":449},{"text":599,"config":600},"Public sector",{"href":163,"dataGaName":164,"dataGaLocation":449},{"text":602,"config":603},"Education",{"href":604,"dataGaName":605,"dataGaLocation":449},"/solutions/education/","education",{"text":607,"config":608},"Financial services",{"href":609,"dataGaName":610,"dataGaLocation":449},"/solutions/finance/","financial services",{"title":171,"links":612},[613,615,617,619,622,624,626,628,630,632,634,636],{"text":183,"config":614},{"href":185,"dataGaName":186,"dataGaLocation":449},{"text":188,"config":616},{"href":190,"dataGaName":191,"dataGaLocation":449},{"text":193,"config":618},{"href":195,"dataGaName":196,"dataGaLocation":449},{"text":198,"config":620},{"href":200,"dataGaName":621,"dataGaLocation":449},"docs",{"text":221,"config":623},{"href":223,"dataGaName":224,"dataGaLocation":449},{"text":216,"config":625},{"href":218,"dataGaName":219,"dataGaLocation":449},{"text":231,"config":627},{"href":233,"dataGaName":234,"dataGaLocation":449},{"text":239,"config":629},{"href":241,"dataGaName":242,"dataGaLocation":449},{"text":244,"config":631},{"href":246,"dataGaName":247,"dataGaLocation":449},{"text":249,"config":633},{"href":251,"dataGaName":252,"dataGaLocation":449},{"text":254,"config":635},{"href":256,"dataGaName":257,"dataGaLocation":449},{"text":259,"config":637},{"href":261,"dataGaName":262,"dataGaLocation":449},{"title":273,"links":639},[640,642,644,646,648,650,652,656,661,663,665,667],{"text":280,"config":641},{"href":282,"dataGaName":275,"dataGaLocation":449},{"text":285,"config":643},{"href":287,"dataGaName":288,"dataGaLocation":449},{"text":293,"config":645},{"href":295,"dataGaName":296,"dataGaLocation":449},{"text":298,"config":647},{"href":300,"dataGaName":301,"dataGaLocation":449},{"text":303,"config":649},{"href":305,"dataGaName":306,"dataGaLocation":449},{"text":308,"config":651},{"href":310,"dataGaName":311,"dataGaLocation":449},{"text":653,"config":654},"Sustainability",{"href":655,"dataGaName":653,"dataGaLocation":449},"/sustainability/",{"text":657,"config":658},"Diversity, inclusion and belonging (DIB)",{"href":659,"dataGaName":660,"dataGaLocation":449},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":313,"config":662},{"href":315,"dataGaName":316,"dataGaLocation":449},{"text":323,"config":664},{"href":325,"dataGaName":326,"dataGaLocation":449},{"text":328,"config":666},{"href":330,"dataGaName":331,"dataGaLocation":449},{"text":668,"config":669},"Modern Slavery Transparency Statement",{"href":670,"dataGaName":671,"dataGaLocation":449},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"items":673},[674,677,680],{"text":675,"config":676},"Terms",{"href":501,"dataGaName":502,"dataGaLocation":449},{"text":678,"config":679},"Cookies",{"dataGaName":511,"dataGaLocation":449,"id":512,"isOneTrustButton":13},{"text":681,"config":682},"Privacy",{"href":506,"dataGaName":507,"dataGaLocation":449},[684,698,711,723,735,746,758,770,782,793,804],{"id":685,"title":686,"body":6,"category":6,"config":687,"content":691,"description":6,"extension":11,"meta":692,"navigation":13,"path":693,"seo":694,"slug":6,"stem":696,"testContent":6,"type":6,"__hash__":697},"blogCategories/en-us/blog/categories/agile-planning.yml","Agile Planning",{"template":688,"slug":689,"hide":690},"BlogCategory","agile-planning",false,{"name":686},{},"/en-us/blog/categories/agile-planning",{"title":686,"description":695},"Browse articles related to Agile Planning on the GitLab Blog","en-us/blog/categories/agile-planning","qAKNooo2KBxPVkTtNsNSqr_dlCLZaNa-P-NxQ09xvKk",{"id":699,"title":700,"body":6,"category":6,"config":701,"content":703,"description":6,"extension":11,"meta":705,"navigation":13,"path":706,"seo":707,"slug":6,"stem":709,"testContent":6,"type":6,"__hash__":710},"blogCategories/en-us/blog/categories/ai-ml.yml","Ai Ml",{"template":688,"slug":702,"hide":690},"ai-ml",{"name":704},"AI/ML",{},"/en-us/blog/categories/ai-ml",{"title":704,"description":708},"Browse articles related to AI/ML on the GitLab Blog","en-us/blog/categories/ai-ml","rEmbrx2EiZNCvAIStbsvNZ9AHldybfSqbXcgAnBzrUY",{"id":712,"title":713,"body":6,"category":6,"config":714,"content":716,"description":6,"extension":11,"meta":717,"navigation":13,"path":718,"seo":719,"slug":6,"stem":721,"testContent":6,"type":6,"__hash__":722},"blogCategories/en-us/blog/categories/bulletin-board.yml","Bulletin Board",{"template":688,"slug":715,"hide":690},"bulletin-board",{"name":713},{},"/en-us/blog/categories/bulletin-board",{"title":713,"description":720},"Browse articles related to Bulletin Board on the GitLab Blog","en-us/blog/categories/bulletin-board","rokHx9i0d8KJYqVFg8OMZo9FpfzYM2j4RucXtyu1S2o",{"id":724,"title":725,"body":6,"category":6,"config":726,"content":728,"description":6,"extension":11,"meta":729,"navigation":13,"path":730,"seo":731,"slug":6,"stem":733,"testContent":6,"type":6,"__hash__":734},"blogCategories/en-us/blog/categories/customer-stories.yml","Customer Stories",{"template":688,"slug":727,"hide":690},"customer-stories",{"name":725},{},"/en-us/blog/categories/customer-stories",{"title":725,"description":732},"Browse articles related to Customer Stories on the GitLab Blog","en-us/blog/categories/customer-stories","_eXBAvnnvbeX9jN4LsnVoa4W7XrZHvta_MtHKbr-5rM",{"id":736,"title":737,"body":6,"category":6,"config":738,"content":739,"description":6,"extension":11,"meta":740,"navigation":13,"path":741,"seo":742,"slug":6,"stem":744,"testContent":6,"type":6,"__hash__":745},"blogCategories/en-us/blog/categories/devsecops.yml","Devsecops",{"template":688,"slug":551,"hide":690},{"name":548},{},"/en-us/blog/categories/devsecops",{"title":548,"description":743},"Browse articles related to DevSecOps on the GitLab Blog","en-us/blog/categories/devsecops","CMEA5RSEzkIxqsAQ42q3nyQxbB-scYg0tjsE5w5w19w",{"id":747,"title":748,"body":6,"category":6,"config":749,"content":751,"description":6,"extension":11,"meta":752,"navigation":13,"path":753,"seo":754,"slug":6,"stem":756,"testContent":6,"type":6,"__hash__":757},"blogCategories/en-us/blog/categories/engineering.yml","Engineering",{"template":688,"slug":750,"hide":690},"engineering",{"name":748},{},"/en-us/blog/categories/engineering",{"title":748,"description":755},"Browse articles related to Engineering on the GitLab Blog","en-us/blog/categories/engineering","8bG3OWoOqnd0RUuGte8_Pd1CHzJY7KSrgk1_B9fzJ8M",{"id":759,"title":760,"body":6,"category":6,"config":761,"content":763,"description":6,"extension":11,"meta":764,"navigation":13,"path":765,"seo":766,"slug":6,"stem":768,"testContent":6,"type":6,"__hash__":769},"blogCategories/en-us/blog/categories/news.yml","News",{"template":688,"slug":762,"hide":690},"news",{"name":760},{},"/en-us/blog/categories/news",{"title":760,"description":767},"Browse articles related to News on the GitLab Blog","en-us/blog/categories/news","IVE63x0_f5y63VT7pAx2RH9p3q2v83g_lRBgr0p4QNo",{"id":771,"title":772,"body":6,"category":6,"config":773,"content":775,"description":6,"extension":11,"meta":776,"navigation":13,"path":777,"seo":778,"slug":6,"stem":780,"testContent":6,"type":6,"__hash__":781},"blogCategories/en-us/blog/categories/open-source.yml","Open Source",{"template":688,"slug":774,"hide":690},"open-source",{"name":772},{},"/en-us/blog/categories/open-source",{"title":772,"description":779},"Browse articles related to Open Source on the GitLab Blog","en-us/blog/categories/open-source","NMRZaCM4ca10TUDhzj6jsX7u5M9zSlzzVFGKbuj2Nz0",{"id":783,"title":71,"body":6,"category":6,"config":784,"content":786,"description":6,"extension":11,"meta":787,"navigation":13,"path":788,"seo":789,"slug":6,"stem":791,"testContent":6,"type":6,"__hash__":792},"blogCategories/en-us/blog/categories/product.yml",{"template":688,"slug":785,"hide":690},"product",{"name":71},{},"/en-us/blog/categories/product",{"title":71,"description":790},"Browse articles related to Product on the GitLab Blog","en-us/blog/categories/product","JCTE8LgoP8oKfWUAM0467yFaiUZ-vxUCM7p6ejl4WTM",{"id":794,"title":104,"body":6,"category":6,"config":795,"content":797,"description":6,"extension":11,"meta":798,"navigation":13,"path":799,"seo":800,"slug":6,"stem":802,"testContent":6,"type":6,"__hash__":803},"blogCategories/en-us/blog/categories/security.yml",{"template":688,"slug":796,"hide":690},"security",{"name":104},{},"/en-us/blog/categories/security",{"title":104,"description":801},"Browse articles related to Security on the GitLab Blog","en-us/blog/categories/security","Hx58KagneyLDkWgUOsPQNGCsWqekf9YGQa6EJFfGFRw",{"id":805,"title":806,"body":6,"category":6,"config":807,"content":809,"description":6,"extension":11,"meta":811,"navigation":13,"path":812,"seo":813,"slug":6,"stem":815,"testContent":6,"type":6,"__hash__":816},"blogCategories/en-us/blog/categories/security-labs.yml","Security Labs",{"template":688,"isCustomCategory":13,"slug":808,"hide":690},"security-labs",{"name":806,"description":810},"Learn about cybersecurity trends, best practices, and third-party threats to secure your code and digital infrastructure.",{},"/en-us/blog/categories/security-labs",{"title":806,"description":814},"Browse articles related to Security Labs on the GitLab Blog","en-us/blog/categories/security-labs","R7W9jD38ydCqWBR5-wSYze-Orc17_eSeMP_60gUwCVg",[818,862,900,938,979,1017,1054,1093,1130,1166,1202],{"category":686,"slug":689,"posts":819},[820,835,848],{"content":821,"config":832},{"body":822,"category":689,"tags":823,"date":826,"title":827,"description":828,"authors":829,"heroImage":831},"GitLab's Agile planning experience is getting a significant upgrade. Starting in GitLab 18.10, the new work items list and saved views bring together two long-requested capabilities: one list that displays all work item types together, and saved views that let you store and return to customized list configurations.\n\nThese capabilities help save time and effort by:\n\n* Eliminating repetitive filter setup for common workflows  \n* Ensuring consistency in how teams view and assess work  \n* Facilitating standardized reporting and status checks\n\n## What are work items?\n\nPreviously, epics and issues lived on separate list pages, requiring users to navigate between them. The work items list combines epics, issues, and other work items into a single, unified list experience, eliminating the need to switch between separate pages for different work item types.\n\nThis is also the foundation for deeper planning capabilities coming in the future. Bringing all work item types into one place paves the way for hierarchy views (like a Table view) that will make it easier to visualize relationships and structure across epics, issues, and other items at a glance.\n\nBeyond list and hierarchy views, we also plan to consolidate other common workflows, like Boards, into this unified experience. The result: all of your essential planning views in one place, shareable with your team through saved views, without needing to navigate across different parts of the product.\n\nYou may be wondering why we call these \"work items\" rather than issues. The short answer is that \"issue\" doesn't scale to where we're going. Soon, you'll be able to fully configure your work item types, including their names, to match your organization's planning hierarchy. Locking the experience to legacy naming would work against that flexibility. \"Work items\" is the foundation for a model you can make your own.\n\n![Work items list view](https://res.cloudinary.com/about-gitlab-com/image/upload/v1774028606/ae9ugijwjsyv3ktiks0n.png)\n\n## What led to the change to work items?\n\nIn 2024, we shared our vision for a [new Agile planning experience in GitLab](https://about.gitlab.com/blog/first-look-the-new-agile-planning-experience-in-gitlab/), powered by the work items framework. That post outlined the core problem: Epics and issues existed as separate experiences, creating friction for teams who expected consistent functionality across planning objects. The work items framework was our answer — a unified architecture designed to deliver consistency and unlock new capabilities across GitLab's planning tools. Work items list and saved views are a step in that journey.\n\n## What are saved views?\n\nSaved views allow users to save and return to customized list configurations, including filters, sort order, and display options. The goal is to make routine checks more efficient and to support consistent, standardized ways of viewing work across a team.\n\n![Saved view](https://res.cloudinary.com/about-gitlab-com/image/upload/v1774028606/izmg27ckskpkdofgvonr.png)\n\n## What's next\n\nTo understand why we are making the changes we are, it helps to picture where we're headed.\n\nThe goal isn't just a work items list; it's a planning experience that lets you move fluidly between different types of views (list, board, table, and more) while retaining your current filter scope.\n\nPair that with saved views, and you can create a dedicated view for each of your workflows: iteration planning, backlog refinement, portfolio-level planning with nested table views, and more.\n\nEach view is ready to go, consistent in how it filters and displays work, and shareable with your team. This framework also sets the stage for more powerful capabilities down the road, including full swimlane support for any work item attribute in boards. \n\nWe know that changes to the tools you use every day can be disruptive. If you've built workflows around the existing epic and issue list pages, this will look and feel different. That's not something we take lightly.\n\nThis direction wasn't a decision we made quickly. It reflects years of feedback, a significant architectural investment in the work items framework, and a genuine belief that a unified experience will serve teams better in the long run. We expect the transition to take some adjustment, and we'll continue to iterate based on what we hear from you!\n\n## Share your feedback\nWe encourage you try these new capabilities. Then, please reach out about your work items list and saved views experience in our [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/work_items/590689). Your comments will help us further improve these capabilities.",[824,825,785],"agile","features","2026-03-23","Agile planning gets a boost from new features in GitLab 18.10","Work items list and saved views reduce context switching, keeping your software development team aligned and their workflows efficient.",[830],"Matthew Macfarlane","https://res.cloudinary.com/about-gitlab-com/image/upload/v1773843921/rm35fx4gylrsu9alf2fx.png",{"featured":13,"template":833,"slug":834},"BlogPost","agile-planning-gets-a-boost-from-new-features-in-gitlab-18-10",{"content":836,"config":846},{"title":837,"description":838,"heroImage":839,"date":840,"body":841,"category":689,"tags":842,"authors":843},"Ace your planning without the context-switching","Learn how GitLab Duo Planner Agent simplifies tasks and saves time by helping product and engineering managers focus on the work that matters most.\n\n","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098354/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%281%29_5XrohmuWBNuqL89BxVUzWm_1750098354056.png","2025-10-28","Software development teams face a challenging balancing act: dozens of tasks, limited time, and constant pressure to pick the right thing to work on next. \n\nThe planning overhead of structuring requirements, managing backlogs, tracking delivery, and writing status updates steals hours from strategic thinking. \n\nThe result? Less time for the high-value decisions that actually drive products forward.\n\nThat’s why we developed [GitLab Duo Planner](https://docs.gitlab.com/user/duo_agent_platform/agents/foundational_agents/planner/), an AI agent built on [GitLab Duo Agent Platform](https://about.gitlab.com/gitlab-duo-agent-platform/) to support product managers directly within GitLab.\n\nGitLab Duo Planner isn't another generic AI assistant. GitLab's product and engineering teams, who live these challenges daily like many of our customers, purpose-built GitLab Duo Planner to orchestrate planning workflows and reduce overhead while improving alignment and predictability.\n\n## Your new planning teammate\n\nToday’s planning workflows face three major problems:\n\n1. Prone to drift -  Unplanned and orphaned work reduce trust in the plan.  \n2. Disruptive to developers - Constant interruptions for status updates break flow.  \n3. Opaque - Hidden risks surface too late to course-correct.\n\nTransforming the way teams work, GitLab Duo Planner turns manual overhead like vague ideas into structured requirements in minutes. Surface hidden backlog problems before they derail sprints. Apply RICE and MoSCoW frameworks instantly to make confident prioritization decisions. With awareness of GitLab context across the platform, every interaction with GitLab Duo Planner saves time and improves decision quality. This is possible because of the foundational agent architecture, bringing deep domain expertise and context awareness specific to GitLab.\n\n## Built for teams\n\nGitLab Duo Planner leverages work items (epics, issues, tasks) and understands the nuances of work breakdown structures, dependency analysis, and effort estimation, making it well positioned to improve visibility, alignment, and confidence in delivery.\n\n* Platform approach - Unlike point solutions, Duo Planner orchestrates across your entire GitLab platform, from planning through development and testing, driving visibility across teams and workflows. \n\n* Embedded in the flow - No more context-switching between tools or diving deep into GitLab to retrieve information. Duo Planner enables contributions, collaboration, and transparency from users across the software development lifecycle. \n\n* Saves time and effort - Use Duo Planner to free your teams from repetitive coordination work, improving delivery predictability, reducing missed commitments while bringing in focus on what actually moves the needle.\n\n## From chaos to clarity\n\nGitLab Duo Planner can help at different stages of software planning and delivery while operating within the planning scope, providing a safe, bounded environment with project visibility.\n\nThe agent can help with six flows:\n\n* Prioritization - Apply frameworks like RICE, MoSCoW, or WSJF to rank work items intelligently\n\n* Work breakdown - Decompose initiatives into epics, features, and user stories to structure requirements\n\n* Dependency analysis - Identify blocked work and understand relationships between items to maintain velocity\n\n* Planning -  Organize sprints, milestones, or quarterly planning \n\n* Status reporting -  Generate summaries of project progress, risks, and blockers to track delivery\n\n* Backlog management -  Identify stale issues, duplicates, or items needing refinement to improve data hygiene\n\n\nHere is an example how GitLab Duo Planner can check the status of an initiative:\n\n\u003Cdiv>\u003Ciframe src=\"https://player.vimeo.com/video/1131065078?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"GitLab Duo Planner Agent\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n\u003Cp>\u003C/p>\n\nDuo Planner is available as a custom agent in the Duo Chat side panel, with the current page context.\n\n\u003Cp>\u003C/p>\n\n![Duo Planner as a custom agent in the Duo Chat side panel](https://res.cloudinary.com/about-gitlab-com/image/upload/v1761323689/ener1mkyj9shg6zvtp4f.png)\n\n\u003Cp>\u003C/p>\n\nLet’s ask Duo Planner about the status of an initiative by providing the epic link:\n\n![Asking Duo Planner about the status of an initiative by providing the epic link](https://res.cloudinary.com/about-gitlab-com/image/upload/v1761323689/gzv2xudegtjhtesz1oaz.png)\n\n\u003Cp>\u003C/p>\n\nWe receive a structured summary with an overview, current status of milestones, in-progress items, dependencies, and blockers, along with actionable recommendations.\n\n![Structured summary](https://res.cloudinary.com/about-gitlab-com/image/upload/v1761323690/guoyqe1b9bstmbjzunez.png)\n\n\u003Cp>\u003C/p>\n\nNext, let’s ask for an executive summary to share with stakeholders:\nGitLab Duo Planner eliminates hours of manual analysis and reporting effort, helping to make decisions faster and keep all stakeholders updated.\n\n![Ask for executive summary](https://res.cloudinary.com/about-gitlab-com/image/upload/v1761323689/xs9zxawqrytfu54ejx2b.png)\n\n\n\u003Cp>\u003C/p>\n\n![Output of executive summary](https://res.cloudinary.com/about-gitlab-com/image/upload/v1761323690/bsbpvjaqnymobzg4knhu.png)\n\n\u003Cp>\u003C/p>\n\nHere are a few more prompts you can try with GitLab Duo Planner:\n\n* “Which of the bugs with a “boards” label should we fix first, considering user impact?”  \n* “Rank these epics by strategic value for Q1.”  \n* “Help me prioritize technical debt against new features.”  \n* “What tasks are needed to implement this user story?”  \n* “Suggest a phased approach for this project: (insert URL).”\n\n## What's next\n\nGitLab Duo Planner focuses intentionally on product managers and engineering managers working in Agile environments. Why? Because specificity drives performance. By training Duo Planner deeply on GitLab's planning workflows and Agile frameworks, we deliver reliable, actionable insights rather than generic suggestions.\n\nAs we evolve the platform, we envision a family of specialized agents, each optimized for specific workflows while contributing to a unified intelligence layer. Today's planner for software teams is just the beginning of how AI will transform work prioritization across all teams.\n\n> If you’re an existing GitLab customer and would like to try GitLab Duo Planner with a prompt of your own, visit our [documentation](https://docs.gitlab.com/user/duo_agent_platform/agents/foundational_agents/planner/) where we cover prerequisites, use cases, and more.",[704,824,825,785],[844,845],"Aathira Nair","Amanda Rueda",{"featured":13,"template":833,"slug":847},"ace-your-planning-without-the-context-switching",{"content":849,"config":860},{"title":850,"description":851,"authors":852,"date":855,"body":856,"category":689,"tags":857,"heroImage":859},"Embedded views: The future of work tracking in GitLab","Learn how embedded views, powered by GitLab Query Language, help GitLab teams work more efficiently, make data-driven decisions, and maintain visibility across complex workflows.",[830,853,854],"Himanshu Kapoor","Alex Fracazo","2025-08-21","Ever find yourself switching between tabs in GitLab just to keep track of what’s happening in your project? Maybe you’re checking on an issue, then jumping to a merge request, then over to an epic to see how everything connects. Before you know it, you’ve got a browser full of tabs and you’ve lost your train of thought.\n\nIf that sounds familiar, you’re definitely not alone. So many teams waste time and energy flipping through various items in their project management software, just trying to get a handle on their work.\n\nThat's why we created [embedded views](https://docs.gitlab.com/user/glql/#embedded-views), powered by [GitLab Query Language (GLQL)](https://docs.gitlab.com/user/glql/). With embedded views, [available in 18.3](https://docs.gitlab.com/releases/18/gitlab-18-3-released/), you get live, relevant information right where you’re already working in GitLab. No more endless context switching. No more outdated reports. Just the info you need, right when you need it.\n## Why embedded views matter\nEmbedded views are more than just a new feature, they're a fundamental shift in how teams understand and track their work within GitLab. With embedded views, teams can maintain context while accessing real-time information, creating shared understanding, and improving collaboration without ever leaving their current workflow. It’s about making work tracking feel natural and effortless, so you can focus on what matters.\n## How it works: Real-time data right where you need it the most\nEmbedded views let you insert live GLQL queries in Markdown code blocks throughout wiki pages, epics, issues, and merge requests. Here's what makes them so useful:\n### Always up to date\nGLQL queries are dynamic, pulling fresh data each time the page loads, so your embedded views always reflect the current state of your work, not the state when you embedded the view. When changes happen to issues, merge requests, or milestones, a page refresh will show those updates in your embedded view.\n### Contextual awareness\nUse functions like `currentUser()` and `today()` to make queries context-specific. Your embedded views automatically adapt to show relevant information for whoever is viewing them, creating personalized experiences without manual configuration.\n### Powerful filtering\nFilter by fields like assignee, author, label, milestone, health status, creation date, and more. Use logical expressions to get exactly the data you want. We support more than 30 fields as of 18.3.\n### Customizable display\nYou can display your data as a table, a list, or a numbered list. Choose which fields to show, set a limit on the number of items, and specify the sort order to keep your view focused and actionable.\n### Availability\nYou can use embedded views in group and project wikis, epic and issue descriptions, merge requests, and comments. GLQL is available across all GitLab tiers: Free, Premium, and Ultimate, on GitLab.com, GitLab Self-Managed, and GitLab Dedicated. Certain functionality, such as displaying epics, status, custom fields, iterations, and weights, is available in the Premium and Ultimate tiers. Displaying health status is available only in Ultimate.\n## See embedded views in action\nThe syntax of an embedded view's source is a superset of YAML that consists of:\n- The `query` parameter: Expressions joined together with a logical operator, such as `and`.\n- Parameters related to the presentation layer, like `display`, `limit`, or `fields`, `title`, and `description`\n  represented as YAML.\n\nA view is defined in Markdown as a code block, similar to other code blocks like Mermaid.\nFor example:\n- Display a table of first 5 open issues assigned to the authenticated user in `gitlab-org/gitlab`.\n- Display columns `title`, `state`, `health`, `description`, `epic`, `milestone`, `weight`, and `updated`.\n````markdown\n```glql\ndisplay: table\ntitle: GLQL table 🎉\ndescription: This view lists my open issues\nfields: title, state, health, epic, milestone, weight, updated\nlimit: 5\nquery: project = \"gitlab-org/gitlab\" AND assignee = currentUser() AND state = opened\n```\n````\nThis source should render a table like the one below:\n![](https://res.cloudinary.com/about-gitlab-com/image/upload/v1755193172/ibzfopvpztpglnccwrjj.png)\n\nAn easy way to create your first embedded view is to navigate to the **More options** dropdown in the rich text editor toolbar. Once in this toolbar, select **Embedded view**, which populates the following query in a Markdown code block:\n````markdown\n```glql\nquery: assignee = currentUser()\nfields: title, createdAt, milestone, assignee\ntitle: Issues assigned to current user\n```\n````\nSave your changes to the comment or description where the code block appears, and you're done! You've successfully created your first embedded view!\n## How GitLab uses embedded views\nWhether tracking merge requests targeting security releases, triaging bugs to improve backlog hygiene, or managing team onboarding and milestone planning, we rely on embedded views for mission-critical processes every day. This isn't just a feature we built, it's a tool we depend on to run our business effectively. When you adopt embedded views, you're getting a tested solution that's already helping GitLab teams work more efficiently, make data-driven decisions, and maintain visibility across complex workflows. Simply stated, embedded views can transform how your team accesses and analyzes the work that matters most to your success.\n\nTo learn and see more about how GitLab is using embedded views internally, check out [How GitLab measures Red Team impact: The adoption rate metric](https://about.gitlab.com/blog/how-gitlab-measures-red-team-impact-the-adoption-rate-metric/), and Global Search Release Planning issues for the [18.1](https://gitlab.com/gitlab-org/search-team/team-tasks/-/issues/239), [18.2](https://gitlab.com/gitlab-org/search-team/team-tasks/-/issues/241), and [18.3](https://gitlab.com/gitlab-org/search-team/team-tasks/-/issues/245) milestones.\n## What's next\n[Embedded views](https://docs.gitlab.com/user/glql/) are just the start of Knowledge Group's vision for work tracking. Learn more about what we're focusing on next in the [embedded views post-GA epic](https://gitlab.com/groups/gitlab-org/-/epics/15249). As embedded views evolve we're committed to making them even more powerful and [accessible](https://gitlab.com/gitlab-org/gitlab/-/issues/548722).\n## Share your experience\nShare your feedback in the [embedded views GA feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/509792) or via the [embedded views GA survey](https://gitlab.fra1.qualtrics.com/jfe/form/SV_6PFhgZMBA06kr7E). Whether you've discovered innovative use cases, encountered challenges, or have ideas for improvements, we want to hear from you.\n",[824,516,858],"workflow","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099072/Blog/Hero%20Images/Blog/Hero%20Images/agile_agile.png_1750099072322.png",{"featured":690,"template":833,"slug":861},"embedded-views-the-future-of-work-tracking-in-gitlab",{"category":704,"slug":702,"posts":863},[864,876,889],{"content":865,"config":874},{"title":866,"description":867,"authors":868,"heroImage":870,"date":871,"body":872,"category":702,"tags":873},"GitLab and Anthropic: Governed AI for enterprise development","GitLab deepens its Anthropic Claude integration, bringing governed AI, access to new models, and cloud flexibility to enterprise software development.",[869],"Stuart Moncada","https://res.cloudinary.com/about-gitlab-com/image/upload/v1776457632/llddiylsgwuze0u1rjks.png","2026-04-28","For enterprise and public sector leaders, the tension is familiar: Software teams need to move faster with AI, while security, compliance, and regulatory expectations only get more stringent. GitLab deepens its Anthropic Claude integration so organizations get access to newly released Claude models inside GitLab’s intelligent orchestration platform where governance, compliance, and auditability already run.\n\nClaude powers capabilities across GitLab Duo Agent Platform as the default model out of the box, across a variety of use cases from code generation and review to agentic chat and vulnerability resolution. If you've used GitLab Duo, you've already experienced how Duo agents automate workflows across the entire software development lifecycle (SDLC).\n\nThis accelerates the integration of Claude’s capabilities into GitLab, broadens how enterprises can deploy them, and reinforces what makes GitLab fundamentally different as a platform for software development and engineering: governance, compliance, and auditability built into every AI interaction.\n\n> \"GitLab Duo has accelerated how our teams plan, build, and ship software. The combination of Anthropic's Claude and GitLab's platform means we're getting more capable AI without changing how we work or how it is governed.\"\n>\n> – Mans Booijink, Operations Manager, Cube\n\n## The real differentiator: Governed AI\n\nWith GitLab, governance controls and auditing are built into the SDLC. When Claude suggests a code change through the GitLab Duo Agent Platform, that suggestion flows through the same merge request process, the same approval rules, the same security scanning, and the same audit trail as every other change. AI doesn't get a shortcut around your controls. It operates within them.\n\nAs GitLab moves deeper into agentic software development, where AI autonomously handles well-defined tasks, the governance layer becomes more important. An AI agent that can open a merge request, help resolve a vulnerability, or refactor a service needs to be auditable, attributable, and subject to the same policy enforcement as a human developer. That requirement is an architectural decision GitLab made from the start, and one that grows more consequential as AI agents take on broader responsibilities.\n\n## Enterprise deployment flexibility\n\nThis also expands how organizations access the latest Claude models through GitLab. Claude is available within GitLab through Google Cloud's Vertex AI and Amazon Bedrock, which means enterprises can route AI workloads through the hyperscaler commitments and cloud governance frameworks they already have in place. No separate vendor contract. No new data residency questions. Your existing Google Cloud or AWS relationship is the on-ramp. \n\nGitLab is now also available in the [Claude Marketplace](https://claude.com/platform/marketplace), allowing customers to purchase GitLab Credits and apply them toward existing Anthropic spending commitments – consolidating AI spend and simplifying how teams discover and procure GitLab alongside their Anthropic investments.\n\n## Advancing an agentic future\n\nGitLab's vision for agentic software development, where AI handles defined tasks autonomously across planning, coding, testing, securing, and deploying, requires models with strong reasoning, reliability, and safety characteristics. It also requires a platform where those autonomous actions are fully governed.\n\nAgentic workflows demand models with strong reasoning, reliability, and safety characteristics, criteria that guide how GitLab selects and integrates AI model partners. And GitLab's governance framework helps ensure that as AI agents assume more advanced development work, enterprises maintain full visibility and control over what those agents do, when they do it, and how changes are tracked.\n\n## What this means for GitLab customers\n\nIf you're already using GitLab Duo Agent Platform, you'll get access to Claude models and deeper AI assistance across your software development lifecycle, all within the governance framework you already rely on.\n\nIf you're evaluating AI-powered software development platforms, you shouldn't have to choose between advanced AI capabilities and enterprise control. This strategic integration is built to deliver both.\n\n> Want to learn more about GitLab Duo Agent Platform? [Get a demo or start a free trial today](https://about.gitlab.com/gitlab-duo-agent-platform/).",[704,785,262],{"featured":13,"template":833,"slug":875},"gitlab-and-anthropic-governed-ai-for-enterprise-development",{"content":877,"config":887},{"title":878,"description":879,"authors":880,"heroImage":882,"date":883,"body":884,"category":702,"tags":885},"Give your AI agent direct, structured GitLab access with glab CLI","The GitLab CLI (glab) provides AI agents structured, reliable access to projects via the MCP, eliminating friction. This tutorial shows how you can speed up code review and issue triage.",[881],"Kai Armstrong","https://res.cloudinary.com/about-gitlab-com/image/upload/v1776347152/unw3mzatkd5xyfbzcnni.png","2026-04-27","\nWhen teams use GitLab Duo, Claude, Cursor, and other AI assistants, more of the development workflow runs through an AI agent acting on your behalf — reading issues, reviewing merge requests, running pipelines, and helping you ship faster. Most developers are already using the GitLab CLI (`glab`) from the terminal to interact with GitLab. Combining the two is a natural next step.\n\n\nThe problem is that without the right tools, AI agents are essentially guessing when it comes to your GitLab projects. They might hallucinate the details of an issue they've never seen, summarize a merge request based on stale training data rather than its actual state, or require you to manually copy context from a browser tab and paste it into a chat window just to get started. Every one of those workarounds is friction: it slows you down, introduces the possibility of error, and puts a hard ceiling on what your agent can actually do on your behalf. `glab` changes that by giving agents a direct, reliable interface to your projects.\n\n\nWith `glab`, your agent fetches what it needs directly from GitLab, acts on it, and reports back — so you spend less time relaying information and more time on the work that matters.\n\n\nIn this tutorial, you'll learn how to use `glab` to give AI agents structured, reliable access to your GitLab projects. You'll also discover how that unlocks a faster, more capable development workflow.\n\n\n## How to connect your AI agent to GitLab through MCP\n\n\nThe most direct way to supercharge your AI workflow is to give your AI agent native access to `glab` through Model Context Protocol ([MCP](https://about.gitlab.com/topics/ai/model-context-protocol/)).\n\n\n MCP is an open standard that lets AI tools discover and use external capabilities at runtime. Once connected, your AI assistant can read issues, comment on merge requests, check pipeline status, and write back to GitLab, all without copying anything from the UI or writing a single API call yourself.\n\n\n To get started, run:\n\n\n ```shell\n # Start the glab MCP server\n glab mcp serve\n ```\n\n\n Once your MCP client is configured, your AI can answer questions like *\"What's the status of my open MRs?\"* or *\"Are there any failing pipelines on main?\"* by querying GitLab directly, not scraping the web UI, not relying on stale training data. See the [full setup docs](https://docs.gitlab.com/cli/) for configuration steps for Claude Code, Cursor, and other editors.\n\n\n One detail worth knowing: `glab` automatically adds `--output json` when invoked through MCP, for any command that supports it. Your agent gets clean, structured data without you needing to think about output formats. And because `glab` uses the official MCP SDK, it stays compatible as the\n protocol evolves.\n\n\n We've also been deliberate about *which* commands are exposed through MCP. Commands that require interactive terminal input are intentionally\n excluded, so your agent never gets stuck waiting for input that will never come. What's exposed is what actually works reliably in an agent context.\n\n\n ## Let your AI participate in code review\n\n\n Most developers have a backlog of MRs waiting for review. It's one of the most time-consuming parts of the job and one of the best places to put\n AI to work. With `glab`, your agent doesn't just observe your review queue, it can work through it with you.\n\n\n ### See exactly what still needs addressing\n\n\n Start with this:\n\n\n ```shell\n glab mr view 2677 --comments --unresolved --output json\n ```\n\n\n This input returns the full MR: metadata, description, and every\n unresolved discussion, as a single structured JSON payload. Hand that to\n your AI and it has everything it needs: which threads are open, what the\n reviewer asked for, and in what context. No tab-switching, no copy-pasting\n individual comments.\n\n\n \n ```json\n {\n   \"id\": 2677,\n   \"title\": \"feat: add OAuth2 support\",\n   \"state\": \"opened\",\n   \"author\": { \"username\": \"jdwick\" },\n   \"labels\": [\"backend\", \"needs-review\"],\n   \"blocking_discussions_resolved\": false,\n   \"discussions\": [\n     {\n       \"id\": \"3107030349\",\n       \"resolved\": false,\n       \"notes\": [\n         {\n           \"author\": { \"username\": \"dmurphy\" },\n           \"body\": \"This error handling will swallow panics — consider wrapping with recover()\",\n           \"created_at\": \"2026-03-14T09:23:11.000Z\"\n         }\n       ]\n     },\n     {\n       \"id\": \"3107030412\",\n       \"resolved\": false,\n       \"notes\": [\n         {\n           \"author\": { \"username\": \"sreeves\" },\n           \"body\": \"Token refresh logic needs a test for the expired token case\",\n           \"created_at\": \"2026-03-14T10:05:44.000Z\"\n         }\n       ]\n     }\n   ]\n }\n ```\n\n\n Instead of reading through every thread yourself, you ask your agent  *\"what do I still need to fix in MR 2677?\"* and get back a prioritized summary with suggested changes. This all happens from a single command.\n\n\n ### Close the loop programmatically\n\n\n Once your AI has helped you address the feedback, it can resolve\n discussions:\n\n\n ```shell\n # List all discussions — structured, ready for the agent to process\n glab mr note list 456 --output json\n\n # Resolve a discussion once the feedback is addressed\n glab mr note resolve 456 3107030349\n\n # Reopen if something needs another look\n glab mr note reopen 456 3107030349\n ```\n\n\n\n ```json\n [\n   {\n     \"id\": 3107030349,\n     \"body\": \"This error handling will swallow panics — consider wrapping with recover()\",\n     \"author\": { \"username\": \"dmurphy\" },\n     \"resolved\": false,\n     \"resolvable\": true\n   },\n   {\n     \"id\": 3107030412,\n     \"body\": \"Token refresh logic needs a test for the expired token case\",\n     \"author\": { \"username\": \"sreeves\" },\n     \"resolved\": false,\n     \"resolvable\": true\n   }\n ]\n ```\n\n\n\n Note IDs are visible directly in the GitLab UI and API, no extra lookup needed. Your agent can work through the full list, verify each fix, and\n resolve as it goes.\n\n\n ## Talk to your AI about your code more effectively\n\n\n Even if you're not running an MCP server, there's a simpler shift that makes a huge difference: using `glab` to feed your AI better information.\n\n\n Think about the last time you asked an AI assistant to help triage issues or debug a failing pipeline. You probably copied some text from the GitLab UI and pasted it into the chat. Here's what your agent is actually\n working with when you do that:\n\n\n ```text\n open issues: 12 • milestone: 17.10 • label: bug, needs-triage ...\n ```\n\n\n Compare that to what it gets with `glab`:\n\n\n \n ```json\n [\n   {\n     \"iid\": 902,\n     \"title\": \"Pipeline fails on merge to main\",\n     \"labels\": [\"bug\", \"needs-triage\"],\n     \"milestone\": { \"title\": \"17.10\" },\n     \"assignees\": []\n   },\n   ...\n ]\n ```\n\n\n Structured, typed, complete; no ambiguity, no parsing guesswork. That's the difference between an agent that can act and one that has to ask\n follow-up questions.\n\n\n If you're using the MCP server, you get this automatically: `glab` adds `--output json` for any command that supports it. If you're working directly\n from the terminal, just add the flag yourself:\n\n\n ```shell\n # Pull open issues for triage\n glab issue list --label \"needs-triage\" --output json\n\n # Check pipeline status\n glab ci status --output json\n\n # Get full MR details\n glab mr view 456 --output json\n ```\n\n\n We've significantly expanded JSON output support in recent releases. It now covers CI status, milestones, labels, releases, schedules, cluster agents, work items, MR approvers, repo contributors, and more. If `glab` can\n retrieve it, your AI can consume it cleanly.\n\n\n ### A real workflow\n\n\n ```shell\n $ glab issue list --label \"needs-triage\" --milestone \"17.10\"\n --output json\n ```\n\n\n ```text\n Agent: I found 2 unassigned bugs in the 17.10 milestone that need triage:\n 1. #902 — Pipeline fails on merge to main (opened 5 days ago)\n 2. #903 — Auth token not refreshing on expiry (opened 4 days ago)\n Both are unassigned. Want me to draft triage notes and suggest assignees based on recent commit history?\n ```\n\n\n ## Your agent is never limited to built-in commands\n\n\n `glab`'s first-class commands cover the most common workflows, but your agent is never limited to them. Through `glab api`, it has authenticated access to the full GitLab REST and GraphQL API surface, using the same session, with no extra credentials or configuration required.\n\n\n This is a meaningful differentiator. Most CLI tools stop at what their commands expose. With `glab`, if GitLab's API supports it, your agent can do it. It's always working from a trusted, authenticated context.\n\n\n A practical example: fetching just the list of changed files in an MR before deciding which diffs to pull in full:\n\n\n ```shell\n # Get changed file paths — lightweight, no diff content yet\n glab api \"/projects/$CI_PROJECT_ID/merge_requests/$CI_MERGE_REQUEST_IID/diffs?per_page=100\" \\\n | jq '.[].new_path'\n\n# Then fetch only the specific file your agent needs\nglab api \"/projects/$CI_PROJECT_ID/merge_requests/$CI_MERGE_REQUEST_IID/diffs?per_page=100\" \\\n| jq '.[] | select(.new_path == \"path/to/file.go\")'\n ```\n\n\n ```text\n \"internal/auth/token.go\"\n \"internal/auth/token_test.go\"\n \"internal/oauth/refresh.go\"\n ```\n\n\n For anything the REST API doesn't cover (epics, certain work item queries, complex cross-project data),  `glab api graphql` gives you the full\n GraphQL interface:\n\n\n ```shell\n   glab api graphql -f query='\n {\n   project(fullPath: \"gitlab-org/gitlab\") {\n     mergeRequest(iid: \"12345\") {\n       title\n       reviewers { nodes { username } }\n     }\n   }\n }'\n ```\n\n ```json\n{\n   \"data\": {\n     \"project\": {\n       \"mergeRequest\": {\n         \"title\": \"feat: add OAuth2 support\",\n         \"reviewers\": {\n           \"nodes\": [\n             { \"username\": \"dmurphy\" },\n             { \"username\": \"sreeves\" }\n           ]\n         }\n       }\n     }\n   }\n }\n\n ```\n\n\n Your agent has a single, authenticated entry point to everything GitLab exposes without the token juggling, separate API clients, or configuration\n overhead.\n\n\n ## What's coming and your feedback\n\n\n Two improvements we're actively working on will make `glab` even more useful for agent workflows:\n\n\n **Agent-aware help text.** Today, `--help` output is written for humansvat a terminal. We're updating it to surface the non-interactive alternative\n for every interactive command, flag which commands support `--output json`, and generally make help a useful resource for agents discovering\n capabilities at runtime — not just humans.\n\n\n **Better machine-readable errors.** When something goes wrong today, agents get the same human-readable error messages as terminal users. We're\n changing that so errors in JSON mode return structured output, giving your agent the information it needs to handle failures gracefully, retry intelligently, or surface the right context back to you.\n\n\n Both of these are in active development. If you're already using `glab` with an AI tool, you're exactly the audience we want feedback from.\n\n\n * **What friction are you hitting?** Commands that don't behave well in agent contexts, error messages that aren't actionable, gaps in JSON output\n coverage. We want to know.\n\n * **What workflows have you unlocked?** Real usage patterns help us prioritize what to build next.\n\n\n Join the discussion in [our feedback issue](https://gitlab.com/gitlab-org/cli/-/issues/8177) — that's where we're shaping the roadmap for agent-friendliness, and where your input will have the most direct impact. If you've found a specific gap, [open an issue](https://gitlab.com/gitlab-org/cli/-/issues/new). If you've got a fix in mind, contributions are welcome. Visit [CONTRIBUTING.md](https://gitlab.com/gitlab-org/cli/-/blob/main/CONTRIBUTING.md) to get started.\n\n\n The GitLab CLI has always been about giving developers more control over their workflow. As AI becomes a bigger part of how we all work, that means making `glab` the best possible interface between your AI tools and your GitLab projects. We're just getting started and we'd love to build the next part with you.\n",[704,785,886],"tutorial",{"featured":13,"template":833,"slug":888},"give-your-ai-agent-direct-structured-gitlab-access-with-glab-cli",{"content":890,"config":898},{"title":891,"description":892,"authors":893,"heroImage":882,"date":895,"body":896,"category":702,"tags":897},"GitHub Copilot's new policy for AI training is a governance wake-up call","Learn what GitHub's Copilot policy change means for regulated industries, and why GitLab's commitment to customer data privacy matters.",[894],"Allie Holland","2026-04-20","GitHub recently [announced](https://github.blog/news-insights/company-news/updates-to-github-copilot-interaction-data-usage-policy/) a significant change to how it handles data from Copilot users. Starting April 24, 2026, interaction data from Copilot Free, Pro, and Pro+ users, including inputs, outputs, code snippets, and associated context, will be used to train AI models by default, unless users actively opt out. Copilot Business and Enterprise customers are exempt under existing contract terms.\n\nFor organizations in regulated industries, including finance, healthcare, defense, and public sector, the policy shift raises questions that go beyond individual developer preferences. It forces a harder look at a question that engineering and security leaders should be asking every AI vendor in their stack: Do you train on our code? \n\nGitLab's answer is no. GitLab does not train AI models on customer code at any tier, and AI vendors are contractually prohibited from using customer inputs or outputs for their own purposes. The [GitLab AI Transparency Center](https://about.gitlab.com/ai-transparency-center/) makes that commitment auditable: a single location documenting which models power which features, how data is handled, subprocessor relationships, and data retention periods. The GitLab AI Transparency Center also lists the compliance status of each feature, including confirmation that GitLab's current AI features do not qualify as high-risk systems under the EU AI Act. It's a standard GitLab CEO Bill Staples has consistently [reiterated](https://www.linkedin.com/posts/williamstaples_gitlab-1810-agentic-ai-now-open-to-even-activity-7443280763715985408-aHxf?utm_source=share&utm_medium=member_desktop&rcm=ACoAABsu7EUBcb_a1-JHKS9RC0B5rf8Ye-5XM60) and one reflected in GitLab's mission and [Trust Center](https://trust.gitlab.com/).\n\n## What the policy change actually means\n\nGitHub's announcement also specifies that the data may be shared with GitHub affiliates, including Microsoft, for AI development purposes.\n\nA policy change of this nature forces organizations to re-examine their AI governance posture, audit their Copilot license tiers, and confirm that the right controls are configured across their teams.\n\n## Why AI governance matters in regulated environments\n\nSource code is often among an organization's most sensitive intellectual property. It may contain references to internal systems, reflect proprietary business logic, or touch data flows governed by strict retention and access policies. When that code passes through an AI assistant, questions about training data usage, model vendor relationships, and data residency become compliance concerns.\n\nThe exposure is particularly acute for financial services firms that have invested in proprietary algorithms, fraud detection logic, credit risk models, underwriting rules, trading strategies. When AI tooling processes that code and uses it to train models serving competitors, vendor data practices become an IP concern.\n\nFinancial institutions operating under [the Federal Reserve's Supervisory Guidance on Model Risk Management (SR 11-7) and the](https://www.federalreserve.gov/supervisionreg/srletters/sr1107.htm) [Digital Operational Resilience Act (DORA)](https://eur-lex.europa.eu/eli/reg/2022/2554/oj/eng) are required to maintain documented, auditable oversight of third-party technology providers, including understanding how those providers handle data. Third-party AI tools used in development workflows increasingly fall within the scope of model risk oversight, and material changes to vendor data practices require updated documentation. \n\nIn the public sector, [the National Institute of Standards and Technology Special Publication 800-53 (NIST 800-53)](https://csrc.nist.gov/publications/detail/sp/800-53/rev-5/final) and the [Federal Information Security Modernization Act (FISMA)](https://www.cisa.gov/topics/cyber-threats-and-advisories/federal-information-security-modernization-act) establish that sensitive or classified code must never leave a controlled boundary. For U.S. Department of Defense and intelligence community environments in particular, a vendor's default data posture is an operational concern. In healthcare, [the Health Insurance Portability and Accountability Act (HIPAA)](https://www.hhs.gov/hipaa/index.html) governs how patient-adjacent data is handled by third parties, and development environments that touch clinical systems increasingly fall within that scope.\n\nAcross all of these contexts, the common thread is the same: A vendor policy that changes data usage defaults, requires individual opt-out, and offers different protections depending on account tier introduces exactly the kind of uncontrolled variable that compliance teams cannot afford.\n\n## What regulated industries actually need from AI vendors\n\nRegulated organizations have largely moved past debating whether to adopt AI in development workflows. The focus now is on doing so in a way they can defend to regulators, boards, and customers. That shift has surfaced a consistent set of requirements regardless of sector.\n\n**Contractual certainty.** Regulated firms need to know, with specificity, what happens to their data. A clear, documented, unconditional commitment is what's required, not something that varies by plan or requires action before a deadline.\n\n**Auditability.** Model risk management frameworks require organizations to understand and validate the AI systems they deploy, including the training data behind those models and the third parties involved in their development. Vendors who cannot answer these questions create documentation risk for the organizations relying on them.\n\n**Separation from vendor incentives.** When an AI vendor trains models on customer usage data, code and workflows become inputs to a system that also serves competitors. For institutions with proprietary trading logic, underwriting models, or fraud detection systems, that's a genuine IP exposure.\n\n## GitLab's position on AI data governance\n\nGitLab does not use customer code to train AI models. This commitment applies at every tier, and AI vendors are contractually prohibited from using inputs or outputs associated with GitLab customers for their own purposes.\n\nThis is a deliberate architectural and policy choice, not a feature of a particular pricing tier. As GitLab's [post on enterprise independence](https://about.gitlab.com/blog/why-enterprise-independence-matters-more-than-ever-in-devsecops/) notes, data governance has become \"an increasingly critical factor in enterprise technology decisions, driven by a complex web of national and regional data protection laws and growing concern about control over sensitive intellectual property.\"\n\nGitLab is also cloud-neutral and model-neutral while supporting self-hosted deployments, not commercially tied to any single cloud provider or large language model (LLM). That i[ndependence matters](https://about.gitlab.com/blog/why-enterprise-independence-matters-more-than-ever-in-devsecops/) for regulated organizations evaluating vendor concentration risk. The [AI Continuity Plan](https://handbook.gitlab.com/handbook/product/ai/continuity-plan/) documents how vendor changes are managed, including material changes to how AI vendors treat customer data, a direct response to the governance requirements under frameworks like [DORA](https://handbook.gitlab.com/handbook/legal/dora/). \n\n## The governance gap AI teams need to close\n\nGitHub's policy update is a reminder that for organizations in regulated industries, understanding exactly how an AI tool handles data is a prerequisite for using it at all. That means asking vendors for clear, documented answers: Is our data used for model training? Who are your AI model subprocessors? What happens if a vendor changes its data practices? Can we deploy in a way that keeps all AI processing within our own infrastructure? What indemnification do you offer for AI-generated output?\n\nVendors who can answer those questions clearly, and document those answers in an auditable form, are vendors you can build on. **Those who cannot will create compliance debt every time they ship a policy update.** And when a vendor can change its data practices with 30 days notice, that's not a partnership built for regulated industries. That's a liability.\n\n> Learn more about GitLab's approach to AI governance at the [GitLab AI Transparency Center](https://about.gitlab.com/ai-transparency-center/).",[704,785],{"featured":690,"template":833,"slug":899},"github-copilots-new-policy-for-ai-training-is-a-governance-wake-up-call",{"category":713,"slug":715,"posts":901},[902,914,926],{"content":903,"config":912},{"title":904,"description":905,"heroImage":906,"authors":907,"date":909,"body":910,"category":715,"tags":911},"curl removed from Omnibus-GitLab FIPS packages in 19.0","With the Omnibus-GitLab 19.0 release, GitLab will stop building curl for FIPS customers. Here’s what you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1776362275/ozbwn9tk0dditpnfddlz.png",[908],"Adam Chu","2026-04-24","Starting with Omnibus-GitLab 19.0 (and the subsequent patch release to existing supported versions), FIPS packages will no longer include a GitLab-built version of curl. Instead, they will use the curl package provided by the customer’s Linux distribution, in the same way that FIPS packages already use the distribution's OpenSSL. \n\n## Why is this change happening?\n\nThis change is necessary because curl 8.18.0 deprecated compilation against OpenSSL 1.x, which prevents us from continuing our previous approach on Amazon Linux 2 and AlmaLinux 8 (affecting RHEL 8 customers). GitLab provides most dependencies for Omnibus-GitLab, but in FIPS packages we link to the distribution's cryptographic libraries rather than bundling our own — and we are now extending that model to curl. \n\nFor maintainability and security reasons, we are applying this change to all FIPS packages, including distributions with OpenSSL 3.0 or later. All FIPS customers are affected.  \n\n## What do I need to do?\n\n### GitLab Self-Managed\n\nGitLab 19.0 will be available starting on May 21, 2026.\n\n> Learn more about the [release schedule](https://about.gitlab.com/releases/).\n\nStarting with the 19.0 Omnibus-GitLab FIPS package, the bundled curl will be removed and replaced with the curl provided by the customer's Linux distribution. The customer's GitLab instance will continue to work as expected. This change has no other impact and doesn't require any immediate action.\n\n## Important implication\n\nGitLab will no longer be responsible for shipping security updates to curl specifically in FIPS packages, and it will be up to the customer to keep their own OS's curl up to date to receive fixes/security patches. Scanner findings for curl will now reflect the host OS package rather than a GitLab-bundled version. This is consistent with how OpenSSL is already handled in FIPS environments.\n\n## What do I do if I still have problems?\n\nIf you need assistance, please open an issue in the [omnibus-gitlab issue tracker](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/new?issue&issuable_template=Bug).\n",[785],{"featured":690,"template":833,"slug":913},"curl-removed-from-omnibus-gitlab-fips-packages-in-19-0",{"content":915,"config":924},{"title":916,"description":917,"authors":918,"heroImage":920,"date":921,"body":922,"category":715,"tags":923},"Claude Opus 4.7 is now available in GitLab Duo Agent Platform","Anthropic's latest model, available now, for stronger agent work. ",[919],"Rebecca Carter","https://res.cloudinary.com/about-gitlab-com/image/upload/v1776174711/ksndibz6sgj1umx5cjsj.png","2026-04-16","\nThe [GitLab Duo Agent Platform](https://docs.gitlab.com/user/duo_agent_platform/) now supports [Claude Opus 4.7](https://www.anthropic.com/news/claude-opus-4-7), Anthropic's latest model, available today via model selection in [Agentic Chat](https://docs.gitlab.com/user/duo_agent_platform/context/#gitlab-duo-agentic-chat) and across agent-powered workflows in your GitLab instance.\n\n\nFor teams running agents across the full software delivery lifecycle, Opus 4.7 brings meaningful improvements to the tasks that matter most: the complex, multistep work that requires sustained reasoning, precise instruction following, and the ability to verify its own outputs before surfacing results.\n\n\n## Stronger reasoning across every agent workflow\n\n\nThe most significant gain is in how Opus 4.7 handles difficult, long-running work. GitLab's internal evaluations showed improved performance over both Sonnet 4.6 and Opus 4.6. That combination translates directly to agents that work more efficiently across CI/CD pipelines, code review, vulnerability resolution, and other multi-tool workflows where compounding errors are costly.\n\n\nTeams with established agent workflows should note that Opus 4.7 interprets instructions more precisely than prior models, which means it executes more faithfully on complex, conditional tasks. For example, agents handling multistep remediation sequences complete each step as specified, giving teams more predictable, auditable outcomes.\n\n\n## Agents keep work moving from code to production\n\nThe promise of agents embedded across every stage of the software development lifecycle is that work stops waiting on people to move it forward. Opus 4.7 helps make that promise more reliable in practice.\n\n\nAt the code generation and test creation stage, agents benefit from Opus 4.7's ability to verify its own outputs before surfacing results. Less back-and-forth, faster iteration, fewer interruptions that pull developers out of flow. In security and vulnerability workflows, stronger instruction adherence means agents stay on task through multistep remediation sequences, completing the work as scoped rather than requiring course corrections along the way.\n\n\nIn CI/CD, where pipeline failures can become team-wide blockers, Opus 4.7's long-horizon consistency matters most. Agents investigating failures, analyzing logs, and proposing fixes work through that sequence coherently, without losing context mid-run. The work gets resolved rather than escalated.\n\nGitLab Duo Agent Platform connects these stages by design. Opus 4.7 strengthens the intelligence layer that runs across all of them, so agents coordinating across planning, development, security, and deployment have a more capable model driving decisions at every handoff. \n\n## Pricing and availability\n\nClaude Opus 4.7 is available now in GitLab Duo Agent Platform via [model selection](https://docs.gitlab.com/administration/gitlab_duo/model_selection/). For a full list of models available for Duo Agent Platform along with their respective credit consumption, please visit our [documentation](https://docs.gitlab.com/subscriptions/gitlab_credits/#models). \n\nYou can start a [free trial of GitLab Duo Agent Platform](https://about.gitlab.com/gitlab-duo-agent-platform/) today. If you are already using GitLab in the free tier, [you can sign up](https://docs.gitlab.com/subscriptions/gitlab_credits/#for-the-free-tier-on-gitlabcom) for Duo Agent Platform by following a few simple steps.\n\nAnd if you are an existing subscriber to GitLab Premium or Ultimate, you can simply [turn on Duo Agent Platform](https://docs.gitlab.com/user/duo_agent_platform/turn_on_off/) and start using the GitLab Credits [that are included](https://docs.gitlab.com/subscriptions/gitlab_credits/#included-credits) with your subscription.\n\n\n*This blog post contains forward-looking statements within the meaning of Section 27A of the Securities Act of 1933, as amended, and Section 21E of the Securities Exchange Act of 1934\\. Although we believe that the expectations reflected in these statements are reasonable, they are subject to known and unknown risks, uncertainties, assumptions and other factors that may cause actual results or outcomes to differ materially. Further information on these risks and other factors is included under the caption \"Risk Factors\" in our filings with the SEC. We do not undertake any obligation to update or revise these statements after the date of this blog post, except as required by law.*\n",[704,785],{"featured":690,"template":833,"slug":925},"claude-opus-4-7-is-now-available-in-gitlab-duo-agent-platform",{"content":927,"config":936},{"title":928,"description":929,"authors":930,"heroImage":932,"date":933,"body":934,"category":715,"tags":935},"Passkeys now available for passwordless sign-in and 2FA on GitLab","Learn how to register a passkey to your account and how two-factor authentication works as a phishing-resistant method.",[931],"GitLab","https://res.cloudinary.com/about-gitlab-com/image/upload/v1772029801/qk75nu1eezxa6aiefpup.png","2026-02-25","Passkeys are now available on GitLab, and offer a more secure and convenient way to access your account. You can use passkeys for passwordless sign-in or as a phishing-resistant two-factor authentication (2FA) method. Passkeys offer the ability to authenticate using your device's fingerprint, face recognition, or PIN. For accounts with 2FA enabled, passkeys automatically become available as your default 2FA method.\n\n\u003Cfigure class=\"video_container\"> \u003Ciframe src=\"https://www.youtube.com/embed/LN5MGRdTHR8?si=OOebJZzN3LkSmzNv\" title=\"Passwordless authentication using passkeys\" frameborder=\"0\" allowfullscreen=\"true\">\u003C/iframe> \u003C/figure>\n\n\u003Cbr>\u003C/br>\n\nTo register a passkey to your account, go to your profile settings and select **Account > Manage authentication**.\n\nPasskeys use WebAuthn technology and public-key cryptography made up of both a private and public key. Your private key stays securely on your device and never leaves, while your public key is stored on GitLab. Even if GitLab were to become compromised, attackers cannot use your stored credentials to access your account. Passkeys work across desktop browsers (Chrome, Firefox, Safari, Edge), mobile devices (iOS 16+, Android 9+), and FIDO2 hardware security keys, allowing you to register multiple passkeys across your devices for convenient access.\n\n![Passkeys sign-in with two-factor authentication](https://res.cloudinary.com/about-gitlab-com/image/upload/v1767807931/n652nkgvna1rsymlfzpi.png)\n\nGitLab signed the [CISA Secure by Design Pledge](https://about.gitlab.com/blog/last-year-we-signed-the-secure-by-design-pledge-heres-our-progress/), committing to improve our security posture and help customers develop secure software faster. One key objective of the pledge is to  increase the use of  [multi-factor authentication (MFA)](https://about.gitlab.com/blog/last-year-we-signed-the-secure-by-design-pledge-heres-our-progress/#multi-factor-authentication-mfa) across the manufacturer’s products. Passkeys are an integral part of this goal, and provide a seamless, phishing-resistant MFA method that makes signing in to GitLab both more secure and more convenient.\n\nIf you have questions, want to share your experience, or would like to engage directly with our team about potential improvements, see the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/work_items/366758).\n",[796,785],{"featured":690,"template":833,"slug":937},"passkeys-now-available-for-passwordless-sign-in-and-2fa-on-gitlab",{"category":725,"slug":727,"posts":939},[940,955,966],{"content":941,"config":953},{"title":942,"description":943,"authors":944,"heroImage":946,"date":947,"body":948,"category":727,"tags":949},"The Co-Create Program: How customers are collaborating to build GitLab","Learn how organizations like Thales, Scania, and Kitware are partnering with GitLab engineers to contribute meaningful features that benefit the entire community.",[945],"Fatima Sarah Khalid","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659756/Blog/Hero%20Images/REFERENCE_-_display_preview_for_blog_images.png","2025-01-30","This past year, over 800 community members have made more than 3,000 contributions to GitLab. These contributors include team members from global organizations like Thales, Scania, and Kitware, who are helping shape GitLab's future through the [Co-Create Program](https://about.gitlab.com/community/co-create/) — GitLab's collaborative development program where customers work directly with GitLab engineers to contribute meaningful features to the platform.\n\nThrough workshops, pair programming sessions, and ongoing support, program participants get hands-on experience with GitLab's architecture and codebase while solving issues or improving existing features.\n\n\"Our experience with the Co-Create Program has been incredible,\" explains Sébastien Lejeune, open source advocate at Thales. \"It only took two months between discussing our contribution with a GitLab Contributor Success Engineer and getting it live in the GitLab release.\"\n\nIn this post, we'll explore how customers have leveraged the Co-Create Program to turn their ideas into code, learning and contributing along the way.\n\n## The Co-Create experience\n[The GitLab Development Kit (GDK)](https://gitlab.com/gitlab-org/gitlab-development-kit) helps contributors get started developing on GitLab. \"The advice I would give new contributors is to remember that you can't break anything with the GDK,\" says Hook. \"If you make a change and it doesn't work, you can undo it or start again. The beauty of GDK is that you can tinker, test, and learn without worrying about the environment.\"\n\nEach participating organization in the Co-Create Program receives support throughout their contribution journey:\n\n- __Technical onboarding workshop__: A dedicated session to set up the GitLab Development Kit (GDK) and understand GitLab's architecture\n- __1:1 engineering support__: Access to GitLab engineers for pair programming and technical guidance\n- __Architecture deep dives__: Focused sessions on specific GitLab components relevant to the issue the organization is contributing to\n- __Code review support__: Detailed feedback and guidance through the merge request process\n- __Regular check-ins__: Ongoing collaboration to ensure progress and address any challenges\n\nThis structure ensures that teams can contribute effectively, regardless of their prior experience with GitLab's codebase or the Ruby/Go programming language. As John Parent from Kitware notes, \"If you've never seen or worked with GitLab before, you're staring at a sophisticated architecture and so much code across different projects. The Co-Create Program helps distill what would take weeks of internal training into a targeted crash course.\"\n\nThe result is a program that not only helps deliver new features but also builds lasting relationships between GitLab and its user community. \"It's inspiring for our engineers to see the passion our customers bring to contributing to and building GitLab together,\" shares Shekhar Patnaik, principal engineer at GitLab. \"Customers get to see the 'GitLab way,' and engineers get to witness their commitment to shaping the future of GitLab.\"\n\n## Enhancing project UX with Thales\nWhen Thales identified opportunities to improve GitLab's empty project UI, they didn't just file a feature request — they built the solution themselves. Their contributions focused on streamlining the new project setup experience by simplifying SSH/HTTPS configuration with a tabbed interface and adding copy/paste functionality for the code snippets. These changes had a significant impact on developer workflows.\n\nThe team's impact extended beyond the UX improvements. Quentin Michaud, PhD fellow for cloud applications on the edge at Thales, contributed to improving the GitLab Development Kit (GDK). As a package maintainer for Arch Linux, Michaud's expertise helped improve GDK's documentation and support its containerization efforts, making it easier for future contributors to get started.\n\n\"My open source experience helped me troubleshoot GDK's support for Linux distros,” says Michaud. “While improving package versioning documentation, I saw that GitLab's Contributor Success team was also working to set up GDK into a container. Seeing our efforts converge was a great moment for me — it showed how open source collaboration can help build better solutions.\"\n\nThe positive experience for the Thales team means that Lejeune now uses the Co-Create Program as \"a powerful example to show our managers the return on investment from open source contributions.\"\n\n## Advancing package support with Scania\nWhen Scania needed advanced package support in GitLab, they saw an opportunity to contribute and build it themselves.\n\n\"As long-time GitLab users who actively promote open source within our organization, the Co-Create Program gave us a meaningful way to contribute directly to open source,\" shares Puttaraju Venugopal Hassan, solution architect at Scania.\n\nThe team started with smaller changes to familiarize themselves with the codebase and review process, then progressed to larger features. \"One of the most rewarding aspects of the Co-Create Program has been looking back at the full, end-to-end process and seeing how far we've come,\" reflects Océane Legrand, software developer at Scania. \"We started with discovery and smaller changes, but we took on larger tasks over time. It's great to see that progression.\"\n\nTheir contributions include bug fixes for the package registry and efforts to enhance the Conan package registry feature set, bringing it closer to general availability (GA) readiness while implementing Conan version 2 support. Their work and collaboration with GitLab demonstrates how the Co-Create Program can drive significant improvements to GitLab’s package registry capabilities.\n\n\"From the start, our experience with the Co-Create Program was very organized. We had training sessions that guided us through everything we needed to contribute. One-on-one sessions with a GitLab engineer also gave us an in-depth look at GitLab’s package architecture, which made the contribution process much smoother,\" said Juan Pablo Gonzalez, software developer at Scania.\n\nThe impact of the program goes beyond code — program participants are also building valuable skills as a direct result of their contributions. In [the GitLab 17.8 release](https://docs.gitlab.com/releases/17/gitlab-17-8-released/#mvp), both Legrand and Gonzalez were recognized as GitLab MVPs. Legrand talked about how the work she's doing in open source impacts both GitLab and Scania, including building new skills for her and her team: \"Contributing through the Co-Create Program has given me new skills, like experience with Ruby and background migrations. When my team at Scania faced an issue during an upgrade, I was able to help troubleshoot because I'd already encountered it through the Co-Create Program.\"\n\n## Optimizing authentication for high-performance computing with Kitware\nKitware brought specialized expertise from their work with national laboratories to improve GitLab's authentication framework. Their contributions included adding support for the OAuth2 device authorization grant flow in GitLab, as well as implementing new database tables, controllers, views, and documentation. This contribution enhances GitLab's authentication options, making it more versatile for devices without browsers or with limited input capabilities.\n\n\"The Co-Create Program is the most efficient and effective way to contribute to GitLab as an external contributor,\" shares John Parent, R&D engineer at Kitware. \"Through developer pairing sessions, we found better implementations that we might have missed working alone.\"\n\nAs a long-time open source contributor, Kitware particularly appreciated GitLab's approach to development. \"I assumed GitLab wouldn't rely on out-of-the-box solutions at its scale, but seeing them incorporate a Ruby dependency instead of building a custom in-house solution was great,” says Parent. “Coming from the C++ world, where package managers are rare, it was refreshing to see this approach and how straightforward it could be.\"\n\n## Building better together: Benefits of Co-Create\nThe Co-Create Program creates value that flows both ways. \"The program bridges a gap between us as GitLab engineers and our customers,\" explains Imre Farkas, staff backend engineer at GitLab. \"As we work with them, we hear their day-to-day challenges, the parts of GitLab they rely on, and where improvements can be made. It's great to see how enthusiastic they are about getting involved in building GitLab with us.\"\n\nThis collaborative approach also accelerates GitLab's development. As Shekhar Patnaik, principal engineer at GitLab, observes: \"Through Co-Create, our customers are helping us move our roadmap forward. Their contributions allow us to deliver critical features faster, benefitting our entire user base. As the program scales, there's a real potential to accelerate development on our most impactful features by working alongside the very people who rely on them.\"\n\n## Get started with Co-Create\nReady to turn your feature requests into reality? Whether you're looking to enhance GitLab's UI like Thales, improve package support like Scania, or optimize authentication like Kitware, the Co-Create Program welcomes organizations who want to actively shape GitLab's future while building valuable open source experience.\n\nContact your GitLab representative to learn more about participating in the Co-Create Program, or visit our [Co-Create page](https://about.gitlab.com/community/co-create/) for more information.\n",[950,951,952],"contributors","open source","customers",{"slug":954,"featured":13,"template":833},"the-co-create-program-how-customers-are-collaborating-to-build-gitlab",{"content":956,"config":964},{"title":957,"description":958,"authors":959,"heroImage":946,"date":961,"body":962,"category":727,"tags":963},"Kingfisher transforming the developer experience with GitLab","Learn how the international company focuses on DevSecOps, including automation, to reduce complexity in workflows for better efficiency.",[960],"Sharon Gaudin","2024-11-12","Kingfisher plc, an international home improvement company, has leaned into GitLab’s end-to-end platform to help it build a DevSecOps foundation that is revolutionizing its developer experience. And the company plans to continue that improvement by increasing its use of platform features, focusing on security, simplifying its toolchain, and increasing the use of automation.\n\n> \u003Cimg align=\"left\" width=\"200\" height=\"200\" hspace=\"5\" vspace=\"5\" alt=\"Chintan Parmar\" src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176076/Blog/ro7u8p695zw9fllbk4j5.png\" style=\"float: left; margin-right: 25px;\"> “The whole point of this is to reduce friction for our engineers, taking away a lot of the complexity in their workflow, and bringing in best practices and governance,” says Chintan Parmar, site reliability engineering manager at Kingfisher. “In terms of what we've done and what we're doing at the moment, it really is about building a foundation in terms of CI/CD and changing the way we deploy to bring in consistency and improve the developer experience.”\n\nParmar talked about his team and their efforts during the [GitLab DevSecOps World Tour event](https://about.gitlab.com/events/epic-conference/) in London last month. In an on-stage interview with Sherrod Patching, vice president of Customer Success Management at GitLab, he laid out Kingfisher’s journey with the platform, which is enabling its teams, while also making it easier and faster to move software updates and new projects from ideation to deployment.\n\n[Kingfisher](https://www.kingfisher.com/en/index.html) is a parent company with more than 2,000 stores in eight countries across Europe. Listed on the London Stock Exchange and part of the Financial Times Stock Exchange (FTSE) 100 Index, the group reported £13 billion in total revenue in FY 2023/24. Its brands include B&Q, Screwfix, Castorama, and Brico Depot.\n\nThe company first adopted GitLab in 2016, using a free starter license, and then moved to Premium in 2020. In that time, it also has moved from on-premise to a cloud environment, started using shared GitLab runners and source code management, and began building out a CI/CD library that gives team members easy access to standardized and reusable components for typical pipeline stages, such as build, deploy, and test.\n\n## Tracking metrics that execs care about\n\nKingfisher also is tracking metrics, like deployment frequency, lead time to change, and change failure rates, with GitLab. And teams are analyzing value streams, mapping workflows, and finding bottlenecks. All of those metrics are being translated into data that company leaders can sink their teeth into.\n\n“Execs may not care about whether a merge request has been waiting 15 or 20 minutes, but they do care about how we translate that time value into dollars or pounds,” says Parmar, who used GitLab when he previously worked at [Dunelm Group, plc,](https://about.gitlab.com/customers/dunelm/) another major UK-based retailer. “Kingfisher is a very data-driven organization. We are looking to overlay these metrics to see where we can continue to improve our developer experience, eliminating slowdowns and manual tasks, while increasing automation.”\n\nWhile on-stage, Parmar made it clear that all the changes being made are aimed at improving software development and deployment. However, it’s equally paramount to making team members’ jobs easier, giving them more time and autonomy to do the kind of work they enjoy, instead of what can seem like a never-ending stream of repetitive, manual tasks. He noted that the team is so focused on easing workflows and giving engineers more time to be innovative, it has created a “developer experience squad.”\n\n## Putting people first while laying out priorities\n\nSo what’s coming next for Kingfisher and its engineering squads, which have about 600 practitioners?\n\nAccording to Parmar, Kingfisher already has its priorities mapped out. Using GitLab to [move security left](https://about.gitlab.com/solutions/application-security-testing/) is at the top of their list. The group also is focused on continuing to reduce its toolchain, and using automation to increase productivity. And he expects that early in 2025, teams will begin “dabbling” with the artificial intelligence capabilities in [GitLab Duo](https://about.gitlab.com/gitlab-duo-agent-platform/), a suite of AI-powered features in the platform that help increase velocity and solve key pain points across the software development lifecycle. Kingfisher will focus on how that can further increase its efficiency and productivity.\n\nTo get all of this done, Parmar says the first step is to ensure that people come first.\n\n“We’re focused on the hearts and minds of our people... and remembering that people can be attached to how they work through pipelines,” he adds. “People have different ways of building their pipelines. We need to understand what they need, what their workflows look like, and then work with them to find the right solution. After, we’ll go back to them with data that shows the improvements worked. So instead of telling them what they need, we find out what that is, and fix what’s slowing them down. That builds a very good rapport with our engineers.”\n\nChanging how a team creates and deploys software is a journey. Parmar suggests that collaboratively taking developers and security teams on that journey, instead of dragging them along, makes a big difference in ease of migration and in easing team members’ user experience.\n\n> Learn [how other GitLab customers use the DevSecOps platform](https://about.gitlab.com/customers/) to gain results for customers.\n",[952,516,548,858],{"slug":965,"featured":13,"template":833},"kingfisher-transforming-the-developer-experience-with-gitlab",{"content":967,"config":977},{"title":968,"description":969,"authors":970,"heroImage":972,"date":973,"body":974,"category":727,"tags":975},"How Indeed transformed its CI platform with GitLab","The world's #1 job site migrated thousands of projects to GitLab CI, boosting productivity and cutting costs. Learn the benefits they realized, including a 79% increase in daily pipelines.",[971],"Carl Myers","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099351/Blog/Hero%20Images/Blog/Hero%20Images/Indeed-blog-cover-image-2_4AgA1DkWLtHwBlFGvMffbC_1750099350771.png","2024-08-27","***Editor's note: From time to time, we invite members of our customer community to contribute to the GitLab Blog. Thanks to Carl Myers, Manager of CI Platforms at Indeed, for sharing your experience with GitLab.***\n\nHere at Indeed, our mission is to help people get jobs. Indeed is the [#1 job site](https://www.indeed.com/about?isid=press_us&ikw=press_us_press%2Freleases%2Faward-winning-actress-viola-davis-to-keynote-indeed-futureworks-2023_textlink_https%3A%2F%2Fwww.indeed.com%2Fabout) in the world with more than 350 million unique visitors every month.\n\nFor Indeed's Engineering Platform teams, we have a slightly different motto: \"We help people to help people get jobs.\" As part of a data-driven engineering culture that has spent the better part of two decades always putting the job seeker first, we are responsible for building the tools that not only make this possible, but empower engineers to deliver positive outcomes to job seekers every day.\n\nGitLab Continuous Integration has allowed Indeed’s CI Platform team of just 11 people to effectively support thousands of users across the company. Other benefits Indeed has realized by moving to GitLab CI include:\n- 79% increase in daily pipelines\n- 10-20% lower CI hardware costs\n- Decreased support burden\n\n## Evolving our CI platform: From Jenkins to a scalable solution\n\nLike many large technology companies, we built our CI platform organically as the company scaled, using the de facto open source and industry standard solutions available at the time. Back in 2007, when Indeed had fewer than 20 engineers, we were using Hudson, Jenkins’ direct predecessor.\n\nToday, through nearly two decades of growth, we have thousands of engineers. As new technology became available, we made incremental improvements, switching to Jenkins around 2011. Another improvement allowed us to move most of our workloads to dynamic cloud worker nodes using [AWS EC2](https://aws.amazon.com/ec2/). As we entered the Kubernetes age, however, the system architecture reached its limits.\n\nJenkins’ architecture was not created with the cloud in mind. Jenkins operates by having a \"controller\" node, a single point of failure that runs critical parts of a pipeline and farms out certain steps to worker nodes (which can scale horizontally to some extent). Controllers are also a manual scaling axis.\n\nIf you have too many jobs to fit on one controller, you must partition your jobs across controllers manually. CloudBees offers ways to mitigate this, including the CloudBees Jenkins Operations Center, which allows you to manage your constellation of controllers from a single centralized place. However, controllers remain challenging to run in a Kubernetes environment because each controller is a fragile single point of failure. Activities like node rollouts or hardware failures cause downtime.\n\nIn addition to the technical limitations baked into Jenkins itself, our CI platform also had several problems of our own making. For example, we used the Groovy Jenkins DSL to generate jobs from code in each repository. This led to each project having its own copy-pasted job pipeline, resulting in hundreds of versions that were hard to maintain and update. While Indeed’s engineering culture values flexibility and allows teams to operate in separate repositories, this flexibility became a burden as teams spent too much time addressing regular maintenance requests.\n\nRecognizing our technical debt, we turned to the [Golden Path pattern](https://tag-app-delivery.cncf.io/whitepapers/platforms/), which allows flexibility while providing a default route to simplify updates and encourage consistent practices across projects.\n\nThe CI Platform team at Indeed is not very large. Our team of around 11 engineers supports thousands of users, fielding support requests, performing upgrades and maintenance, and enabling always-on support for our global company.\n\nBecause our team not only supports our GitLab instance but also the entire CI platform, including the artifact server, our shared build code, and multiple other custom components of our platform, we had our work cut out for us. We needed a plan that would help us address our challenges while making the most efficient use of our existing resources.\n\n## Moving to GitLab CI\n\nAfter a careful design review with key stakeholders, we decided to migrate the entire company from Jenkins to GitLab CI. The primary reasons for choosing GitLab CI were:\n- We were already using GitLab for source code management.\n- GitLab is a complete offering that provides everything we need for CI.\n- GitLab CI is designed for scalability and the cloud.\n- GitLab CI enables us to write templates that extend other templates, which is compatible with our golden path strategy.\n- GitLab is open source software and the GitLab team has always been supportive in helping us submit fixes, giving us extra flexibility and reassurance.\n\nBy the time we officially announced that the GitLab CI Platform would be generally available to users, we already had 23% of all builds happening in GitLab CI from a combination of grassroots efforts and early adopters.\n\nThe challenge of the migration, however, would be the long tail. Due to the number of custom builds in Jenkins, an automated migration tool would not work for the majority of teams. Most of the benefits of the new system would not come until the old system was at 0%. Only then could we turn off the hardware and save the CloudBees license fee.\n\n## Feature parity and the benefits of starting over\n\nThough we support many different technologies at Indeed, the three most common languages are Java, Python, and JavaScript. These language stacks are used to make libraries, deployables (web services or applications), and cron jobs (a process that runs at regular intervals, for example, to build a data set in our data lake). Each of these formed a matrix of project types (Java Library, Python Cronjob, JavaScript Webapp, etc.) for which we had a skeleton in Jenkins. Therefore, we had to produce a golden path template in GitLab CI for each of these project types.\n\nMost users could use these recommended paths without change, but for those who did require customization, the golden path would still be a valuable starting point and enable them to change only what they needed, while still benefiting from centralized template updates in the future.\n\nWe quickly realized that most users, even those with customizations, were happy to take the golden path and at least try it. If they missed their customizations, they could always add them later. This was a surprising result! We thought that teams who had invested in significant customization would be loath to give them up, but in the majority of cases teams just didn't care about them anymore. This allowed us to migrate many projects very quickly — we could just drop the golden path (a small file about 6 lines long with includes) into their project, and they could take it from there.\n\n## InnerSource to the rescue\n\nThe CI Platform team also adopted a policy of \"external contributions first\" to encourage everyone in the company to participate. This is sometimes called InnerSource. We wrote tests and documentation to enable external contributions — contributions from outside our immediate team — so teams that wanted to write customizations could instead include them in the golden path behind a feature flag. This let them share their work with others and ensure we didn't break them moving forward (because they became part of our codebase, not theirs).\n\nThis also had the benefit that particular teams who were blocked waiting for a feature they needed were empowered to work on the feature themselves. We could say \"we plan to implement the feature in a few weeks, but if you need it earlier than that we are happy to accept a contribution.\" In the end, many core features necessary for parity were developed in this manner, more quickly and better than our team had resources to do it. The migration would not have been a success without this model.\n\n## Ahead of schedule and under budget\n\nOur CloudBees license expired on April 1, 2024. This gave us an aggressive target to achieve the full migration. This was particularly ambitious considering that at the time, 80% of all builds (60% of all projects) still used Jenkins for their CI. This meant over 2,000 [Jenkinsfiles](https://www.jenkins.io/doc/book/pipeline/jenkinsfile/) would still need to be rewritten or replaced with our golden path templates.\n\nTo achieve this target, we made documentation and examples available, implemented features where possible, and helped our users contribute features where they were able.\n\nWe started regular office hours, where anyone could come and ask questions or seek our help to migrate. We additionally prioritized support questions relating to migration ahead of almost everything else. Our team became GitLab CI experts and shared that expertise inside our team and across the organization.\n\nAutomatic migration for most projects was not possible, but we discovered it could work for a small subset of projects where customization was rare. We created a Sourcegraph batch change campaign to submit merge requests to migrate hundreds of projects, and poked and prodded our users to accept these MRs.\n\nWe took success stories from our users and shared them widely. As users contributed new features to our golden paths, we advertised that these features \"came free\" when you migrated to GitLab CI. Some examples included built-in security and compliance scanning, Slack notifications for CI builds, and integrations with other internal systems.\n\nWe also conducted a campaign of aggressive \"scream tests.\" We automatically disabled Jenkins jobs that hadn't run or succeeded in a while, and told users that if they needed them, they could turn them back on. This was a low-friction way to identify which jobs were actually needed. We had thousands of jobs that hadn't been run a single time since our last CI migration (which was Jenkins to Jenkins). This told us we could safely ignore almost all of them.\n\nIn January 2024, we nudged our users by announcing that all Jenkins controllers would become read-only (no builds) unless an exception was explicitly requested. We had much better ownership information for controllers and they generally aligned with our organization's structure, so it made sense to focus on controllers rather than jobs. The list of controllers was also a much more manageable list than the list of jobs.\n\nTo obtain an exception, we asked our users to find their controllers in a spreadsheet and put their contact information next to each one. This enabled us to get a guaranteed up-to-date list of stakeholders we could follow up with as we sprinted to the finish line, but also enabled users to clearly let us know which jobs they absolutely needed. At peak, we had about 400 controllers; by January we had 220, but only 54 controllers required exceptions (several of them owned by us, to run our tests and canaries).\n\n![Indeed - Jenkins Controller Count graph](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099357/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099357392.png)\n\nWe had a manageable list of around 50 teams we divided among our team and started doing outreach to understand how each team was progressing with the migration. We spent January and February discovering that some teams planned to finish their migration without our help before February 28 others were planning to deprecate their projects before then, and a very small number were very worried they wouldn't make it.\n\nWe were able to work with this smaller set of teams and provide them with “white-glove” service. We still explained that while we lacked the expertise necessary to do the migration for them, we could partner with a subject matter expert from their team. For some projects, we wrote and they reviewed; for others, they wrote and we reviewed. In the end, all of our work paid off and we turned off Jenkins on the very day we had announced 8 months earlier.\n\n## The results: Enhanced CI efficiency and user satisfaction\n\nAt its peak, our Jenkins CI platform ran over 14,000 pipelines per day and serviced our thousands of projects. Today, our GitLab CI platform has run over 40,000 pipelines in a single day and regularly runs over 25,000 per day. The incremental cost of each job of each pipeline is similar to Jenkins, but without the overhead of hardware to run the controllers. Additionally, these controllers served as single points of failure and scaling limiters that forced us to artificially divide our platform into segments. While an apples-to-apples comparison is difficult, we find that with this overhead gone our CI hardware costs are 10-20% lower. Additionally, the support burden of GitLab CI is lower since the application automatically scales in the cloud, has cross-availability-zone resiliency, and the templating language has excellent public documentation available.\n\nA benefit just as important, if not moreso, is that now we are at over 70% adoption of our golden paths. This means that we can roll out an improvement and over 5,000 projects at Indeed will benefit immediately with no action required on their part. This has enabled us to move some jobs to more cost-effective ARM64 instances, keep users' build images updated more easily, and better manage other cost saving opportunities. Most importantly, our users are happier with the new platform.\n\n__About the author:__\n*Carl Myers lives in Sacramento, CA, and is the manager of the CI Platform team at Indeed. Carl has spent his nearly two-decade career dedicated to building internal tools and developer platforms that delight and empower engineers at companies large and small.*\n\n**Acknowledgements:**\n*This migration would not have been possible without the tireless efforts of Tron Nedelea, Eddie Huang, Vivek Nynaru, Carlos Gonzalez, Lane Van Elderen, and the rest of the CI Platform team. The team also especially appreciates the leadership of Deepak Bitragunta, and Irina Tyree for helping secure buy-in, resources and company wide alignment throughout this long project. Finally, our thanks go out to everyone across Indeed who contributed code, feedback, bug reports, and helped migrate projects.*\n\n**This is an edited version of the article [How Indeed Replaced Its CI Platform with Gitlab CI](https://engineering.indeedblog.com/blog/2024/08/indeed-gitlab-ci-migration/), originally published on the Indeed engineering blog.**",[952,89,976,516],"user stories",{"slug":978,"featured":13,"template":833},"how-indeed-transformed-its-ci-platform-with-gitlab",{"category":548,"slug":551,"posts":980},[981,993,1006],{"content":982,"config":991},{"title":983,"description":984,"authors":985,"heroImage":987,"date":988,"body":989,"category":551,"tags":990},"Teaching software development the easy way using GitLab","Learn how University of Washington lecturer Stephen G. Dame uses GitLab for Education to manage student assignments, distribute course materials, and provide inline code feedback at scale.\n",[986],"Rod Burns","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659537/Blog/Hero%20Images/display-article-image-0679-1800x945-fy26.png","2026-04-29","For instructors teaching software development, one of the biggest logistical challenges is assignment distribution and feedback at scale. How do you give large groups of students access to course materials, keep solution code private, and still deliver meaningful, contextual feedback without lots of administrative overhead?\n\nThe **[GitLab for Education program](https://about.gitlab.com/solutions/education/)** provides qualifying institutions with free access to **GitLab Ultimate**, enabling instructors to build professional-grade workflows that mirror real-world software development environments. In this article, you'll learn how Stephen G. Dame, a lecturer in the Computing and Software Systems department at the University of Washington, Bothell, uses simple workflows in GitLab to manage everything from course materials to student feedback across multiple classes.\n\n## From aerospace to academia: Bringing GitLab to the classroom\n\nDame came to academia with years of experience as a chief software engineer at Boeing Commercial Airplanes, where GitLab was used for aerospace projects. As an adjunct professor, he became an early advocate for GitLab within the university, joining the GitLab for Education program to access the full feature set needed to run structured, scalable course workflows.\n\n> **\"GitLab provides the greatest way to organize multiple classes, student assignments, lectures, and code samples through the use of Groups and Subgroups, which I found to be unique to GitLab compared to other repository platforms.\"**\n>\n> - Stephen G. Dame, University of Washington, Bothell\n\n## Set up groups: Build the right structure before writing a line of code\n\nThe foundation of an effective GitLab-based course is a well-planned group hierarchy. GitLab's **[Groups and Subgroups](https://docs.gitlab.com/tutorials/manage_user/#create-the-organization-parent-group-and-subgroups)** allow instructors to model the natural structure of a university department institution, course, and role with precise, inheritable permissions at every level.\n\nDame's structure places the university at the root (`UWTeaching`), with each course occupying its own subgroup (e.g. `css430`). Within each course sit repositories for `lecture-materials` and `code`, alongside dedicated Subgroups for `students` and `graders`. Instructor materials remain private, while student and grader subgroups are configured with controlled permissions so that assignment briefs and solutions are visible only to the right people.\n\n![Screenshot of GitLab group hierarchy — institution, course subgroup, and per-student subgroups](https://res.cloudinary.com/about-gitlab-com/image/upload/v1777463673/dpxfnitv76pdmvcqtgag.png)\n\nPermissions cascade downward through the hierarchy via **Manage > Members**, allowing Dame to add students to a course's `students` subgroup with `Reporter` access and an expiration date tied to the end of the academic quarter. Students can clone and pull from assignment repositories but cannot push — keeping solution code firmly under instructor control.\n\nStudents are guided to set up SSH keys across all their working environments (local machines, cloud shells, virtual machines) so they can clone repositories and receive weekly updates via `git pull`. They copy relevant code into their own private repositories to manage their own version history.\n\n**Tip for large classes:** For larger cohorts, adding students by hand is impractical. GitLab's REST API lets you automate subgroup creation and membership from a list of usernames. Below is a sample Python script that handles this:\n\n```python\n    import gitlab\n    from datetime import datetime\n\n    # Connect to your GitLab instance\n    gl = gitlab.Gitlab('https://gitlab.com', private_token='YOUR_PRIVATE_TOKEN')\n\n    # Target parent group ID (e.g., the ID for \"css430 > students\")\n    parent_group_id = 12345678\n\n    # Set expiration: typically the beginning of the next month after quarter end\n    expiry_date = '2025-01-01'\n\n    # List of collected student usernames\n    student_list = ['alice_css430', 'bob_css430', 'carol_css430', 'dave_css430', 'eve_css430']\n\n    for username in student_list:\n        try:\n            # 1. Create a personal subgroup for the student\n            subgroup = gl.groups.create({\n                'name': username,\n                'path': username,\n                'parent_id': parent_group_id,\n                'visibility': 'private'\n            })\n\n            # 2. Add student to the new subgroup with Expiration\n            user = gl.users.list(username=username)[0]\n            subgroup.members.create({\n                'user_id': user.id,\n                'access_level': gitlab.const.REPORTER_ACCESS,\n                'expires_at': expiry_date\n            })\n            print(f\"Success: Subgroup created and student added for {username}\")\n        except Exception as e:\n            print(f\"Error processing {username}: {e}\")\n```\nThere is also an [open source project that automates class management](https://gitlab.com/edu-docs/class-management-automation) published by GitLab that provides additional tooling for this workflow.\n## Give feedback where the work actually lives\n\nOnce the structure is in place, the feedback workflow is where GitLab's value becomes most apparent to students. Dame asks students to submit assignments by opening a **[merge request](https://docs.gitlab.com/user/project/merge_requests/)** in their repository. This gives instructors an immediate, clean diff of everything the student has written.\n![A GitLab merge request showing inline code comment function for an instructor](https://res.cloudinary.com/about-gitlab-com/image/upload/v1777467468/icclzyglbkwlvfysggbi.png)\nInstructors can click any line of code and leave an **inline comment** — not just flagging what is wrong, but explaining why, and pointing to what to look at next. Students receive this feedback in direct context with their code, which is far more actionable than a comment at the bottom of a submitted document.\n\n## Join GitLab for Education\n\nSetting up your first GitLab assignment takes some initial effort, but once the structure is in place it largely runs itself. The real payoff goes beyond organization: Students graduate having worked daily in an environment that mirrors professional software development, building habits around [version control](https://about.gitlab.com/topics/version-control/) and [code review](https://docs.gitlab.com/development/code_review/) rather than learning them as abstract concepts.\n\nIf you are just getting started, keep it simple. Begin with a single course group, one assignment template, and a basic pipeline. The structure will grow naturally alongside your confidence with the platform.\n\nMake sure to **[sign up for GitLab for Education](https://about.gitlab.com/solutions/education/join/)** so that you and your students can access all top-tier features, including unlimited reviewers on merge requests, additional compute minutes, and expanded storage.\n\n> [Apply to the GitLab for Education program today](https://about.gitlab.com/solutions/education/join/).",[605,951],{"featured":690,"template":833,"slug":992},"teaching-software-development-the-easy-way-using-gitlab",{"content":994,"config":1004},{"description":995,"authors":996,"heroImage":998,"date":999,"title":1000,"body":1001,"category":551,"tags":1002},"AI-generated code is 34% of development work. Discover how to balance productivity gains with quality, reliability, and security.",[997],"Manav Khurana","https://res.cloudinary.com/about-gitlab-com/image/upload/v1767982271/e9ogyosmuummq7j65zqg.png","2026-01-08","AI is reshaping DevSecOps: Attend GitLab Transcend to see what’s next","AI promises a step change in innovation velocity, but most software teams are hitting a wall. According to our latest [Global DevSecOps Report](https://about.gitlab.com/developer-survey/), AI-generated code now accounts for 34% of all development work. Yet 70% of DevSecOps professionals report that AI is making compliance management more difficult, and 76% say agentic AI will create unprecedented security challenges.\n\nThis is the AI paradox: AI accelerates coding, but software delivery slows down as teams struggle to test, secure, and deploy all that code.\n\n## Productivity gains meet workflow bottlenecks\nThe problem isn't AI itself. It's how software gets built today. The traditional DevSecOps lifecycle contains hundreds of small tasks that developers must navigate manually: updating tickets, running tests, requesting reviews, waiting for approvals, fixing merge conflicts, addressing security findings. These tasks drain an average of seven hours per week from every team member, according to our research.\n\nDevelopment teams are producing code faster than ever, but that code still crawls through fragmented toolchains, manual handoffs, and disconnected processes. In fact, 60% of DevSecOps teams use more than five tools for software development overall, and 49% use more than five AI tools. This fragmentation creates collaboration barriers, with 94% of DevSecOps professionals experiencing factors that limit collaboration in the software development lifecycle.\n\nThe answer isn't more tools. It's intelligent orchestration that brings software teams and their AI agents together across projects and release cycles, with enterprise-grade security, governance, and compliance built in.\n\n## Seeking deeper human-AI partnerships\nDevSecOps professionals don't want AI to take over — they want reliable partnerships. The vast majority (82%) say using agentic AI would increase their job satisfaction, and 43% envision an ideal future with a 50/50 split between human and AI contributions. They're ready to trust AI with 37% of their daily tasks without human review, particularly for documentation, test writing, and code reviews.\n\nWhat we heard resoundingly from DevSecOps professionals is that AI won't replace them; rather, it will fundamentally reshape their roles. 83% of DevSecOps professionals believe AI will significantly change their work within five years, and notably, 76% think this will create more engineering jobs, not fewer. As coding becomes easier with AI, engineers who can architect systems, ensure quality, and apply business context will be in high demand.\n\nCritically, 88% agree there are essential human qualities that AI will never fully replace, including creativity, innovation, collaboration, and strategic vision.\n\nSo how can organizations bridge the gap between AI’s promise and the reality of fragmented workflows?\n\n## Join us at GitLab Transcend: Explore how to drive real value with agentic AI\nOn February 10, 2026, GitLab will be hosting Transcend, where we'll reveal how intelligent orchestration transforms AI-powered software development. You'll get a first look at GitLab's upcoming product roadmap and learn how teams are solving real-world challenges by modernizing development workflows with AI.\n\nOrganizations winning in this new era balance AI adoption with security, compliance, and platform consolidation. AI offers genuine productivity gains when implemented thoughtfully — not by replacing human developers, but by freeing DevSecOps professionals to focus on strategic thinking and creative innovation.\n\n[Register for Transcend today](https://about.gitlab.com/events/transcend/virtual/) to secure your spot and discover how intelligent orchestration can help your software teams stay in flow.",[704,1003,796],"DevOps platform",{"featured":13,"template":833,"slug":1005},"ai-is-reshaping-devsecops-attend-gitlab-transcend-to-see-whats-next",{"content":1007,"config":1015},{"title":1008,"description":1009,"authors":1010,"heroImage":839,"date":1012,"body":1013,"category":551,"tags":1014},"Atlassian ending Data Center as GitLab maintains deployment choice","As Atlassian transitions Data Center customers to cloud-only, GitLab presents a menu of deployment choices that map to business needs.",[1011],"Emilio Salvador","2025-10-07","Change is never easy, especially when it's not your choice. Atlassian's announcement that [all Data Center products will reach end-of-life by March 28, 2029](https://www.atlassian.com/blog/announcements/atlassian-ascend), means thousands of organizations must now reconsider their DevSecOps deployment and infrastructure. But you don't have to settle for deployment options that don't fit your needs. GitLab maintains your freedom to choose — whether you need self-managed for compliance, cloud for convenience, or hybrid for flexibility — all within a single AI-powered DevSecOps platform that respects your requirements.\n\nWhile other vendors force migrations to cloud-only architectures, GitLab remains committed to supporting the deployment choices that match your business needs. Whether you're managing sensitive government data, operating in air-gapped environments, or simply prefer the control of self-managed deployments, we understand that one size doesn't fit all.\n\n## The cloud isn't the answer for everyone\n\nFor the many companies that invested millions of dollars in Data Center deployments, including those that migrated to Data Center [after its Server products were discontinued](https://about.gitlab.com/blog/atlassian-server-ending-move-to-a-single-devsecops-platform/), this announcement represents more than a product sunset. It signals a fundamental shift away from customer-centric architecture choices, forcing enterprises into difficult positions: accept a deployment model that doesn't fit their needs, or find a vendor that respects their requirements.\n\nMany of the organizations requiring self-managed deployments represent some of the world's most important organizations: healthcare systems protecting patient data, financial institutions managing trillions in assets, government agencies safeguarding national security, and defense contractors operating in air-gapped environments.\n\nThese organizations don't choose self-managed deployments for convenience; they choose them for compliance, security, and sovereignty requirements that cloud-only architectures simply cannot meet. Organizations operating in closed environments with restricted or no internet access aren't exceptions — they represent a significant portion of enterprise customers across various industries.\n\n![GitLab vs. Atlassian comparison table](https://res.cloudinary.com/about-gitlab-com/image/upload/v1759928476/ynl7wwmkh5xyqhszv46m.jpg)\n\n## The real cost of forced cloud migration goes beyond dollars\n\nWhile cloud-only vendors frame mandatory migrations as \"upgrades,\" organizations face substantial challenges beyond simple financial costs:\n\n* **Lost integration capabilities:** Years of custom integrations with legacy systems, carefully crafted workflows, and enterprise-specific automations become obsolete. Organizations with deep integrations to legacy systems often find cloud migration technically infeasible.\n\n* **Regulatory constraints:** For organizations in regulated industries, cloud migration isn't just complex — it's often not permitted. Data residency requirements, air-gapped environments, and strict regulatory frameworks don't bend to vendor preferences. The absence of single-tenant solutions in many cloud-only approaches creates insurmountable compliance barriers.\n\n* **Productivity impacts:** Cloud-only architectures often require juggling multiple products: separate tools for planning, code management, CI/CD, and documentation. Each tool means another context switch, another integration to maintain, another potential point of failure. GitLab research shows [30% of developers spend at least 50% of their job maintaining and/or integrating their DevSecOps toolchain](https://about.gitlab.com/developer-survey/). Fragmented architectures exacerbate this challenge rather than solving it.\n\n## GitLab offers choice, commitment, and consolidation\n\nEnterprise customers deserve a trustworthy technology partner. That's why we've committed to supporting a range of deployment options — whether you need on-premises for compliance, hybrid for flexibility, or cloud for convenience, the choice remains yours. That commitment continues with [GitLab Duo](https://about.gitlab.com/gitlab-duo-agent-platform/), our AI solution that supports developers at every stage of their workflow.\n\nBut we offer more than just deployment flexibility. While other vendors might force you to cobble together their products into a fragmented toolchain, GitLab provides everything in a **comprehensive AI-native DevSecOps platform**. Source code management, CI/CD, security scanning, Agile planning, and documentation are all managed within a single application and a single vendor relationship.\n\nThis isn't theoretical. When Airbus and [Iron Mountain](https://about.gitlab.com/customers/iron-mountain/) evaluated their existing fragmented toolchains, they consistently identified challenges: poor user experience, missing functionalities like built-in security scanning and review apps, and management complexity from plugin troubleshooting. **These aren't minor challenges; they're major blockers for modern software delivery.**\n\n## Your migration path: Simpler than you think\n\nWe've helped thousands of organizations migrate from other vendors, and we've built the tools and expertise to make your transition smooth:\n\n* **Automated migration tools:** Our [Bitbucket Server importer](https://docs.gitlab.com/user/import/bitbucket_server/) brings over repositories, pull requests, comments, and even Large File Storage (LFS) objects. For Jira, our [built-in importer](https://docs.gitlab.com/user/project/import/jira/) handles issues, descriptions, and labels, with professional services available for complex migrations.\n\n* **Proven at scale:** A 500 GiB repository with 13,000 pull requests, 10,000 branches, and 7,000 tags is likely to [take just 8 hours to migrate](https://docs.gitlab.com/user/import/bitbucket_server/) from Bitbucket to GitLab using parallel processing.\n\n* **Immediate ROI:** A [Forrester Consulting Total Economic Impact™ study commissioned by GitLab](https://about.gitlab.com/resources/study-forrester-tei-gitlab-ultimate/) found that investing in GitLab Ultimate confirms these benefits translate to real bottom-line impact, with a three-year 483% ROI, 5x time saved in security related activities, and 25% savings in software toolchain costs.\n\n## Start your journey to a unified DevSecOps platform\n\nForward-thinking organizations aren't waiting for vendor-mandated deadlines. They're evaluating alternatives now, while they have time to migrate thoughtfully to platforms that protect their investments and deliver on promises.\n\nOrganizations invest in self-managed deployments because they need control, compliance, and customization. When vendors deprecate these capabilities, they remove not just features but the fundamental ability to choose environments matching business requirements.\n\nModern DevSecOps platforms should offer complete functionality that respects deployment needs, consolidates toolchains, and accelerates software delivery, without forcing compromises on security or data sovereignty.\n\n[Talk to our sales team](https://about.gitlab.com/sales/) today about your migration options, or explore our [comprehensive migration resources](https://about.gitlab.com/move-to-gitlab-from-atlassian/) to see how thousands of organizations have already made the switch.\n\nYou also can [try GitLab Ultimate with GitLab Duo Enterprise](https://about.gitlab.com/free-trial/devsecops/) for free for 30 days to see what a unified DevSecOps platform can do for your organization.",[556,548,785,825],{"featured":13,"template":833,"slug":1016},"atlassian-ending-data-center-as-gitlab-maintains-deployment-choice",{"category":748,"slug":750,"posts":1018},[1019,1030,1042],{"content":1020,"config":1028},{"title":1021,"description":1022,"authors":1023,"heroImage":1025,"date":871,"body":1026,"category":750,"tags":1027},"How to build CI/CD observability at scale","This practical guide to GitLab pipeline analytics helps self-managed users gain operational insights using Prometheus and Grafana.",[1024],"Paul Meresanu","https://res.cloudinary.com/about-gitlab-com/image/upload/v1774465167/n5hlvrsrheadeccyr1oz.png","CI/CD optimization starts with visibility. Building a successful DevOps platform at enterprise scale **should include** understanding pipeline performance, job execution patterns, and quantifiable operational insights — especially for organizations running GitLab self-managed instances.\n\nTo help GitLab customers maximize their platform investments, we developed the GitLab CI/CD Observability solution as part of our Platform Excellence program, which transforms raw pipeline metrics into actionable operational insights.\n\nA leading financial services organization partnered with GitLab's customer success architect to gain visibility into their GitLab self-managed deployment. Together, we implemented a containerized observability solution combining the open-source gitlab-ci-pipelines-exporter with enterprise-grade Prometheus and Grafana infrastructure.\n\nIn this article, you'll learn the challenges they faced managing pipelines at scale and how GitLab CI/CD Observability addressed them with a practical, end-to-end implementation.\n\n## The challenge: Measuring CI/CD performance\nBefore implementing any observability solution, define your measurement landscape:\n*   **What metrics matter?** Pipeline duration, job success rates, queue times, runner utilization\n*   **Who needs visibility?** Developers, DevOps engineers, platform teams, leadership\n*   **What decisions will this drive?** Infrastructure investment, bottleneck remediation, capacity planning\n\n## Solution architecture: A full set of dashboards for observability\nOnce deployed, the observability stack provides a set of Grafana dashboards that give real-time and historical visibility into your CI/CD platform. A typical deployment includes:\n*   **Pipeline Overview Dashboard:** A top-level view showing total pipeline runs, success/failure rates over time (as stacked bar or time-series charts), and average pipeline duration trends. Panels use color-coded status indicators (green for success, red for failure, amber for cancelled) so platform teams can spot degradation at a glance.\n*   **Job Performance Dashboard:** Drill-down panels showing individual job duration distributions (histogram), the top 10 slowest jobs by average duration, and job failure heatmaps by project and stage. This is where teams identify specific bottleneck jobs worth optimizing.\n*   **Runner & Infrastructure Dashboard:** Combines Node Exporter host metrics (CPU, memory, disk) with pipeline queue-time data to correlate infrastructure saturation with pipeline wait times. Useful for capacity planning decisions such as scaling runner pools or upgrading instance sizes.\n*   **Deployment Frequency Dashboard:** Tracks deployment count and deployment duration over time per environment, aligned with DORA metrics. Helps engineering leadership assess delivery throughput and environment drift (commits behind main).\n\nEach dashboard is provisioned automatically via Grafana's file-based provisioning, so it deploys consistently across environments. The dashboards can be further customized with Grafana variables to filter by project, ref/branch, or time range.\n\n![Solution architecture](https://res.cloudinary.com/about-gitlab-com/image/upload/v1777382608/Blog/Imported/blog-building-ci-cd-observability-stack-for-gitlab-self-managed/image1.png)\n\nThe solution requires two exporters:\n*   **Pipeline Exporter:** Collects CI/CD metrics via GitLab API (pipeline duration, job status, deployments)\n*   **Node Exporter:** Collects host-level metrics (CPU, memory, disk) for infrastructure correlation\n\n**Prerequisites:**\n*   GitLab Self-Managed Version 18.1+\n*   **Container orchestration platform:** A Kubernetes cluster (recommended for enterprise deployments) or a container runtime such as Docker/Podman for smaller scale or proof-of-concept environments. The primary deployment guide below targets Kubernetes; a Docker Compose alternative is provided in the appendix for local testing and evaluation\n*   GitLab Personal Access Token (**read_api** scope)\n\n## Kubernetes deployment (recommended)\nFor enterprise environments, deploy each component as a separate Deployment within a dedicated namespace. This approach integrates with existing cluster infrastructure, secrets management, and network policies.\n\n### 1. Create namespace and secret\n```bash\nkubectl create namespace gitlab-observability\n\n# Create the GitLab token secret (see Secrets Management section below\n# for enterprise-grade approaches using external secret operators)\nkubectl create secret generic gitlab-token \\\n  --from-literal=token=glpat-xxxxxxxxxxxx \\\n  -n gitlab-observability\n```\n\n\n### 2. Deploy the Pipeline Exporter\n```yaml\n# exporter-deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: gitlab-ci-pipelines-exporter\n  namespace: gitlab-observability\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: gitlab-ci-pipelines-exporter\n  template:\n    metadata:\n      labels:\n        app: gitlab-ci-pipelines-exporter\n    spec:\n      containers:\n        - name: exporter\n          image: mvisonneau/gitlab-ci-pipelines-exporter:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: GCPE_GITLAB_TOKEN\n              valueFrom:\n                secretKeyRef:\n                  name: gitlab-token\n                  key: token\n            - name: GCPE_CONFIG\n              value: /etc/gcpe/config.yml\n          volumeMounts:\n            - name: config\n              mountPath: /etc/gcpe\n      volumes:\n        - name: config\n          configMap:\n            name: gcpe-config\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: gitlab-ci-pipelines-exporter\n  namespace: gitlab-observability\nspec:\n  selector:\n    app: gitlab-ci-pipelines-exporter\n  ports:\n    - port: 8080\n      targetPort: 8080\n```\n\n### 3. Deploy Node Exporter (DaemonSet)\n```yaml\n# node-exporter-daemonset.yaml\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n  name: node-exporter\n  namespace: gitlab-observability\nspec:\n  selector:\n    matchLabels:\n      app: node-exporter\n  template:\n    metadata:\n      labels:\n        app: node-exporter\n    spec:\n      containers:\n        - name: node-exporter\n          image: prom/node-exporter:latest\n          ports:\n            - containerPort: 9100\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: node-exporter\n  namespace: gitlab-observability\nspec:\n  selector:\n    app: node-exporter\n  ports:\n    - port: 9100\n      targetPort: 9100\n```\n\n### 4. Deploy Prometheus\n```yaml\n# prometheus-deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: prometheus\n  namespace: gitlab-observability\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: prometheus\n  template:\n    metadata:\n      labels:\n        app: prometheus\n    spec:\n      containers:\n        - name: prometheus\n          image: prom/prometheus:latest\n          ports:\n            - containerPort: 9090\n          volumeMounts:\n            - name: config\n              mountPath: /etc/prometheus\n      volumes:\n        - name: config\n          configMap:\n            name: prometheus-config\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: prometheus\n  namespace: gitlab-observability\nspec:\n  selector:\n    app: prometheus\n  ports:\n    - port: 9090\n      targetPort: 9090\n```\n\n### 5. Deploy Grafana\nThe Grafana deployment below starts with authentication disabled (`GF_AUTH_ANONYMOUS_ENABLED: true`) for initial setup convenience.\n\n**This setting allows anyone with network access to view all dashboards without logging in.** For production deployments, remove this variable or set it to false and configure a proper authentication provider (LDAP, SAML/SSO, or OAuth) to restrict access to authorized users.\n```yaml\n# grafana-deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: grafana\n  namespace: gitlab-observability\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: grafana\n  template:\n    metadata:\n      labels:\n        app: grafana\n    spec:\n      containers:\n        - name: grafana\n          image: grafana/grafana:10.0.0\n          ports:\n            - containerPort: 3000\n          env:\n            # REMOVE or set to 'false' for production.\n            # When 'true', any user with network access can\n            # view dashboards without authentication.\n            - name: GF_AUTH_ANONYMOUS_ENABLED\n              value: 'true'\n          volumeMounts:\n            - name: dashboards-provider\n              mountPath: /etc/grafana/provisioning/dashboards\n            - name: datasources\n              mountPath: /etc/grafana/provisioning/datasources\n            - name: dashboards\n              mountPath: /var/lib/grafana/dashboards\n      volumes:\n        - name: dashboards-provider\n          configMap:\n            name: grafana-dashboards-provider\n        - name: datasources\n          configMap:\n            name: grafana-datasources\n        - name: dashboards\n          configMap:\n            name: grafana-dashboards\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: grafana\n  namespace: gitlab-observability\nspec:\n  selector:\n    app: grafana\n  ports:\n    - port: 3000\n      targetPort: 3000\n```\n\n### 6. Set network policy\nRestrict inter-pod traffic to only the required communication paths:\n```yaml\n# network-policy.yaml\napiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n  name: observability-policy\n  namespace: gitlab-observability\nspec:\n  podSelector: {}\n  policyTypes:\n    - Ingress\n  ingress:\n    # Prometheus scrapes exporter and node-exporter\n    - from:\n        - podSelector:\n            matchLabels:\n              app: prometheus\n      ports:\n        - port: 8080\n        - port: 9100\n    # Grafana queries Prometheus\n    - from:\n        - podSelector:\n            matchLabels:\n              app: grafana\n      ports:\n        - port: 9090\n```\n\n### 7. Validate\n```bash\nkubectl get pods -n gitlab-observability\nkubectl port-forward svc/grafana 3000:3000 -n gitlab-observability\ncurl http://localhost:3000/api/health\n```\n\n## Configuration reference\n### Exporter configuration\n```yaml\n# gitlab-ci-pipelines-exporter.yml (ConfigMap: gcpe-config)\nlog:\n  level: info\ngitlab:\n  url: https://gitlab.your-domain.com\n  maximum_requests_per_second: 10\nproject_defaults:\n  pull:\n    pipeline:\n      jobs:\n        enabled: true\nwildcards:\n  - owner:\n      name: your-group-name\n      kind: group\n    archived: false\n```\n\n### Prometheus configuration\n```yaml\n# prometheus.yml (ConfigMap: prometheus-config)\nglobal:\n  scrape_interval: 15s\nscrape_configs:\n  - job_name: 'gitlab-ci-pipelines-exporter'\n    static_configs:\n      - targets: ['gitlab-ci-pipelines-exporter:8080']\n  - job_name: 'node-exporter'\n    static_configs:\n      - targets: ['node-exporter:9100']\n```\n\n### Grafana data sources\n```yaml\n# datasources.yml (ConfigMap: grafana-datasources)\napiVersion: 1\ndatasources:\n  - name: Prometheus\n    type: prometheus\n    access: proxy\n    url: http://prometheus:9090\n    isDefault: true\n# dashboards.yml (ConfigMap: grafana-dashboards-provider)\napiVersion: 1\nproviders:\n  - name: 'default'\n    folder: 'GitLab CI/CD'\n    type: file\n    options:\n      path: /var/lib/grafana/dashboards\n```\n\n## Key metrics\n### Pipeline Exporter metrics\n| Metric | Description |\n| :---- | :---- |\n| `gitlab_ci_pipeline_duration_seconds` | Pipeline execution time |\n| `gitlab_ci_pipeline_status` | Pipeline success/failure by project |\n| `gitlab_ci_pipeline_job_duration_seconds` | Individual job execution time |\n| `gitlab_ci_pipeline_job_status` | Job success/failure status |\n| `gitlab_ci_pipeline_job_artifact_size_bytes` | Artifact storage consumption |\n| `gitlab_ci_pipeline_coverage` | Code coverage percentage |\n| `gitlab_ci_environment_deployment_count` | Deployment frequency |\n| `gitlab_ci_environment_deployment_duration_seconds` | Deployment execution time |\n| `gitlab_ci_environment_behind_commits_count` | Environment drift from main |\n\n### Node Exporter metrics\n| Metric | Description |\n| :---- | :---- |\n| `node_cpu_seconds_total` | CPU utilization |\n| `node_memory_MemAvailable_bytes` | Available memory |\n| `node_filesystem_avail_bytes` | Disk space available |\n| `node_load1` | 1-minute load average |\n\n## Troubleshooting\n### Air-gapped Grafana plugin installation\nFor offline environments, install plugins manually. Example for Kubernetes:\n```bash\n# Copy plugin zip into the Grafana pod\nkubectl cp grafana-polystat-panel-2.1.16.zip \\\n  gitlab-observability/grafana-\u003Cpod-id>:/tmp/\n# Extract plugin\nkubectl exec -it -n gitlab-observability deploy/grafana -- \\\n  sh -c \"unzip /tmp/grafana-polystat-panel-2.1.16.zip -d /var/lib/grafana/plugins/\"\n# Restart Grafana pod\nkubectl rollout restart deployment/grafana -n gitlab-observability\n# Verify installation\nkubectl exec -it -n gitlab-observability deploy/grafana -- \\\n  ls -al /var/lib/grafana/plugins/\n```\n\n## Enterprise considerations\nFor regulated industries, ensure:\n*   **Token security:** Store GitLab Personal Access Tokens in a dedicated secrets manager rather than hardcoded in ConfigMaps. Enforce token rotation policies and limit scope to **read\\_api** only.\n*   **Network segmentation:** Deploy behind a reverse proxy with TLS termination. In Kubernetes, use an Ingress controller with automated certificate provisioning.\n*   **Authentication:** Configure Grafana with your organization's identity provider (SAML, LDAP, or OAuth/OIDC) to enforce role-based access control on dashboards.\n\n## Why GitLab?\nGitLab's API-first design enables custom observability solutions that complement native capabilities like Value Stream Analytics and DORA metrics. The open architecture allows organizations to integrate proven open-source tooling — like the gitlab-ci-pipelines-exporter — directly with their existing enterprise infrastructure, without disrupting established workflows.\n\nAs your observability maturity grows, GitLab's built-in Observability capabilities provide a natural next step — offering deeper, integrated visibility without additional tooling. Learn more about what's available natively in the platform for [GitLab Observability](https://docs.gitlab.com/operations/observability/observability/).\n",[89,785,886],{"featured":690,"template":833,"slug":1029},"how-to-build-ci-cd-observability-at-scale",{"content":1031,"config":1040},{"body":1032,"title":1033,"description":1034,"authors":1035,"heroImage":1037,"date":1038,"category":750,"tags":1039},"Most CI/CD tools can run a build and ship a deployment. Where they diverge is what happens when your delivery needs get real: a monorepo with a dozen services, microservices spread across multiple repositories, deployments to dozens of environments, or a platform team trying to enforce standards without becoming a bottleneck.\n  \nGitLab's pipeline execution model was designed for that complexity. Parent-child pipelines, DAG execution, dynamic pipeline generation, multi-project triggers, merge request pipelines with merged results, and CI/CD Components each solve a distinct class of problems. Because they compose, understanding the full model unlocks something more than a faster pipeline. In this article, you'll learn about the five patterns where that model stands out, each mapped to a real engineering scenario with the configuration to match.\n  \nThe configs below are illustrative. The scripts use echo commands to keep the signal-to-noise ratio low. Swap them out for your actual build, test, and deploy steps and they are ready to use.\n\n\n## 1. Monorepos: Parent-child pipelines + DAG execution\n\n\nThe problem: Your monorepo has a frontend, a backend, and a docs site. Every commit triggers a full rebuild of everything, even when only a README changed.\n\n\nGitLab solves this with two complementary features: [parent-child pipelines](https://docs.gitlab.com/ci/pipelines/downstream_pipelines/#parent-child-pipelines) (which let a top-level pipeline spawn isolated sub-pipelines) and [DAG execution via `needs`](https://docs.gitlab.com/ci/yaml/#needs) (which breaks rigid stage-by-stage ordering and lets jobs start the moment their dependencies finish).\n\n\nA parent pipeline detects what changed and triggers only the relevant child pipelines:\n\n```yaml\n# .gitlab-ci.yml\nstages:\n  - trigger\n\ntrigger-services:\n  stage: trigger\n  trigger:\n    include:\n      - local: '.gitlab/ci/api-service.yml'\n      - local: '.gitlab/ci/web-service.yml'\n      - local: '.gitlab/ci/worker-service.yml'\n    strategy: depend\n```\n\n\nEach child pipeline is a fully independent pipeline with its own stages, jobs, and artifacts. The parent waits for all of them via [strategy: depend](https://docs.gitlab.com/ci/pipelines/downstream_pipelines/#wait-for-downstream-pipeline-to-complete) so you get a single green/red signal at the top level, with full drill-down into each service's pipeline. This organizational separation is the bigger win for large teams: each service owns its pipeline config, changes in one cannot break another, and the complexity stays manageable as the repo grows.\n\n\nOne thing worth knowing: when you pass [multiple files to a single `trigger: include:`](https://docs.gitlab.com/ci/pipelines/downstream_pipelines/#combine-multiple-child-pipeline-configuration-files), GitLab merges them into a single child pipeline configuration. This means jobs defined across those files share the same pipeline context and can reference each other with `needs:`, which is what makes the DAG optimization possible. If you split them into separate trigger jobs instead, each would be its own isolated pipeline and cross-file `needs:` references would not work.\n\n\nCombine this with `needs:` inside each child pipeline and you get DAG execution. Your integration tests can start the moment the build finishes, without waiting for other jobs in the same stage.\n\n```yaml\n# .gitlab/ci/api-service.yml\nstages:\n  - build\n  - test\n\nbuild-api:\n  stage: build\n  script:\n    - echo \"Building API service\"\n\ntest-api:\n  stage: test\n  needs: [build-api]\n  script:\n    - echo \"Running API tests\"\n```\n\n\nWhy it matters: Teams with large monorepos typically report significant reductions in pipeline runtime after switching to DAG execution, since jobs no longer wait on unrelated work in the same stage. Parent-child pipelines add the organizational layer that keeps the configuration maintainable as the repo and team grow.\n\n![Local downstream pipelines](https://res.cloudinary.com/about-gitlab-com/image/upload/v1775738759/Blog/Imported/hackathon-fake-blog-post-s/image3_vwj3rz.png \"Local downstream pipelines\")\n\n## 2. Microservices: Cross-repo, multi-project pipelines\n\n\nThe problem: Your frontend lives in one repo, your backend in another. When the frontend team ships a change, they have no visibility into whether it broke the backend integration and vice versa.\n\n\nGitLab's [multi-project pipelines](https://docs.gitlab.com/ci/pipelines/downstream_pipelines/#multi-project-pipelines) let one project trigger a pipeline in a completely separate project and wait for the result. The triggering project gets a linked downstream pipeline right in its own pipeline view.\n\n\nThe frontend pipeline builds an API contract artifact and publishes it, then triggers the backend pipeline. The backend fetches that artifact directly using the [Jobs API](https://docs.gitlab.com/api/jobs/#download-a-single-artifact-file-from-specific-tag-or-branch) and validates it before allowing anything to proceed. If a breaking change is detected, the backend pipeline fails and the frontend pipeline fails with it.\n\n```yaml\n# frontend repo: .gitlab-ci.yml\nstages:\n  - build\n  - test\n  - trigger-backend\n\nbuild-frontend:\n  stage: build\n  script:\n    - echo \"Building frontend and generating API contract...\"\n    - mkdir -p dist\n    - |\n      echo '{\n        \"api_version\": \"v2\",\n        \"breaking_changes\": false\n      }' > dist/api-contract.json\n    - cat dist/api-contract.json\n  artifacts:\n    paths:\n      - dist/api-contract.json\n    expire_in: 1 hour\n\ntest-frontend:\n  stage: test\n  script:\n    - echo \"All frontend tests passed!\"\n\ntrigger-backend-pipeline:\n  stage: trigger-backend\n  trigger:\n    project: my-org/backend-service\n    branch: main\n    strategy: depend\n  rules:\n    - if: $CI_COMMIT_BRANCH == \"main\"\n```\n\n```yaml\n# backend repo: .gitlab-ci.yml\nstages:\n  - build\n  - test\n\nbuild-backend:\n  stage: build\n  script:\n    - echo \"All backend tests passed!\"\n\nintegration-test:\n  stage: test\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"pipeline\"\n  script:\n    - echo \"Fetching API contract from frontend...\"\n    - |\n      curl --silent --fail \\\n        --header \"JOB-TOKEN: $CI_JOB_TOKEN\" \\\n        --output api-contract.json \\\n        \"${CI_API_V4_URL}/projects/${FRONTEND_PROJECT_ID}/jobs/artifacts/main/raw/dist/api-contract.json?job=build-frontend\"\n    - cat api-contract.json\n    - |\n      if grep -q '\"breaking_changes\": true' api-contract.json; then\n        echo \"FAIL: Breaking API changes detected - backend integration blocked!\"\n        exit 1\n      fi\n      echo \"PASS: API contract is compatible!\"\n```\n\n\nA few things worth noting in this config. The `integration-test` job uses `$CI_PIPELINE_SOURCE == \"pipeline\"` to ensure it only runs when triggered by an upstream pipeline, not on a standalone push to the backend repo. The frontend project ID is referenced via `$FRONTEND_PROJECT_ID`, which should be set as a [CI/CD variable](https://docs.gitlab.com/ci/variables/) in the backend project settings to avoid hardcoding it.\n\n\nWhy it matters: Cross-service breakage that previously surfaced in production gets caught in the pipeline instead. The dependency between services stops being invisible and becomes something teams can see, track, and act on.\n\n\n![Cross-project pipelines](https://res.cloudinary.com/about-gitlab-com/image/upload/v1775738762/Blog/Imported/hackathon-fake-blog-post-s/image4_h6mfsb.png \"Cross-project pipelines\")\n\n\n## 3. Multi-tenant / matrix deployments: Dynamic child pipelines\n\n\nThe problem: You deploy the same application to 15 customer environments, or three cloud regions, or dev/staging/prod. Updating a deploy stage across all of them one by one is the kind of work that leads to configuration drift. Writing a separate pipeline for each environment is unmaintainable from day one.\n\n\nGitLab's [dynamic child pipelines](https://docs.gitlab.com/ci/pipelines/downstream_pipelines/#dynamic-child-pipelines) let you generate a pipeline at runtime. A job runs a script that produces a YAML file, and that YAML becomes the pipeline for the next stage. The pipeline structure itself becomes data.\n\n\n```yaml\n# .gitlab-ci.yml\nstages:\n  - generate\n  - trigger-environments\n\ngenerate-config:\n  stage: generate\n  script:\n    - |\n      # ENVIRONMENTS can be passed as a CI variable or read from a config file.\n      # Default to dev, staging, prod if not set.\n      ENVIRONMENTS=${ENVIRONMENTS:-\"dev staging prod\"}\n      for ENV in $ENVIRONMENTS; do\n        cat > ${ENV}-pipeline.yml \u003C\u003C EOF\n      stages:\n        - deploy\n        - verify\n      deploy-${ENV}:\n        stage: deploy\n        script:\n          - echo \"Deploying to ${ENV} environment\"\n      verify-${ENV}:\n        stage: verify\n        script:\n          - echo \"Running smoke tests on ${ENV}\"\n      EOF\n      done\n  artifacts:\n    paths:\n      - \"*.yml\"\n    exclude:\n      - \".gitlab-ci.yml\"\n\n.trigger-template:\n  stage: trigger-environments\n  trigger:\n    strategy: depend\n\ntrigger-dev:\n  extends: .trigger-template\n  trigger:\n    include:\n      - artifact: dev-pipeline.yml\n        job: generate-config\n\ntrigger-staging:\n  extends: .trigger-template\n  needs: [trigger-dev]\n  trigger:\n    include:\n      - artifact: staging-pipeline.yml\n        job: generate-config\n\ntrigger-prod:\n  extends: .trigger-template\n  needs: [trigger-staging]\n  trigger:\n    include:\n      - artifact: prod-pipeline.yml\n        job: generate-config\n  when: manual\n```\n\n\nThe generation script loops over an `ENVIRONMENTS` variable rather than hardcoding each environment separately. Pass in a different list via a CI variable or read it from a config file and the pipeline adapts without touching the YAML. The trigger jobs use [extends:](https://docs.gitlab.com/ci/yaml/#extends) to inherit shared configuration from `.trigger-template`, so `strategy: depend` is defined once rather than repeated on every trigger job. Add a new environment by updating the variable, not by duplicating pipeline config. Add [when: manual](https://docs.gitlab.com/ci/yaml/#when) to the production trigger and you get a promotion gate baked right into the pipeline graph.\n\n\nWhy it matters: SaaS companies and platform teams use this pattern to manage dozens of environments without duplicating pipeline logic. The pipeline structure itself stays lean as the deployment matrix grows.\n\n\n![Dynamic pipeline](https://res.cloudinary.com/about-gitlab-com/image/upload/v1775738765/Blog/Imported/hackathon-fake-blog-post-s/image7_wr0kx2.png \"Dynamic pipeline\")\n\n\n## 4. MR-first delivery: Merge request pipelines, merged results, and workflow routing\n\n\nThe problem: Your pipeline runs on every push to every branch. Expensive tests run on feature branches that will never merge. Meanwhile, you have no guarantee that what you tested is actually what will land on `main` after a merge.\n\n\nGitLab has three interlocking features that solve this together:\n\n\n*   [Merge request pipelines](https://docs.gitlab.com/ci/pipelines/merge_request_pipelines/) run only when a merge request exists, not on every branch push. This alone eliminates a significant amount of wasted compute.\n\n*   [Merged results pipelines](https://docs.gitlab.com/ci/pipelines/merged_results_pipelines/) go further. GitLab creates a temporary merge commit (your branch plus the current target branch) and runs the pipeline against that. You are testing what will actually exist after the merge, not just your branch in isolation.\n\n*   [Workflow rules](https://docs.gitlab.com/ci/yaml/workflow/) let you define exactly which pipeline type runs under which conditions and suppress everything else. The `$CI_OPEN_MERGE_REQUESTS` guard below prevents duplicate pipelines firing for both a branch and its open MR simultaneously.\n\n\nWith those three working together, here is what a tiered pipeline looks like:\n\n```yaml\n# .gitlab-ci.yml\nworkflow:\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n    - if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS\n      when: never\n    - if: $CI_COMMIT_BRANCH\n    - if: $CI_PIPELINE_SOURCE == \"schedule\"\n\nstages:\n  - fast-checks\n  - expensive-tests\n  - deploy\n\nlint-code:\n  stage: fast-checks\n  script:\n    - echo \"Running linter\"\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"push\"\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n    - if: $CI_COMMIT_BRANCH == \"main\"\n\nunit-tests:\n  stage: fast-checks\n  script:\n    - echo \"Running unit tests\"\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"push\"\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n    - if: $CI_COMMIT_BRANCH == \"main\"\n\nintegration-tests:\n  stage: expensive-tests\n  script:\n    - echo \"Running integration tests (15 min)\"\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n    - if: $CI_COMMIT_BRANCH == \"main\"\n\ne2e-tests:\n  stage: expensive-tests\n  script:\n    - echo \"Running E2E tests (30 min)\"\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n    - if: $CI_COMMIT_BRANCH == \"main\"\n\nnightly-comprehensive-scan:\n  stage: expensive-tests\n  script:\n    - echo \"Running full nightly suite (2 hours)\"\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"schedule\"\n\ndeploy-production:\n  stage: deploy\n  script:\n    - echo \"Deploying to production\"\n  rules:\n    - if: $CI_COMMIT_BRANCH == \"main\"\n      when: manual\n```\n\nWith this setup, the pipeline behaves differently depending on context. A push to a feature branch with no open MR runs lint and unit tests only. Once an MR is opened, the workflow rules switch from a branch pipeline to an MR pipeline, and the full integration and E2E suite runs against the merged result. Merging to `main` queues a manual production deployment. A nightly schedule runs the comprehensive scan once, not on every commit.\n\n\nWhy it matters: Teams routinely cut CI costs significantly with this pattern, not by running fewer tests, but by running the right tests at the right time. Merged results pipelines catch the class of bugs that only appear after a merge, before they ever reach `main`.\n\n\n![Conditional pipelines (within a branch with no MR)](https://res.cloudinary.com/about-gitlab-com/image/upload/v1775738768/Blog/Imported/hackathon-fake-blog-post-s/image6_dnfcny.png \"Conditional pipelines (within a branch with no MR)\")\n\n\n\n![Conditional pipelines (within an MR)](https://res.cloudinary.com/about-gitlab-com/image/upload/v1775738772/Blog/Imported/hackathon-fake-blog-post-s/image1_wyiafu.png \"Conditional pipelines (within an MR)\")\n\n\n\n![Conditional pipelines (on the main branch)](https://res.cloudinary.com/about-gitlab-com/image/upload/v1775738774/Blog/Imported/hackathon-fake-blog-post-s/image5_r6lkfd.png \"Conditional pipelines (on the main branch)\")\n\n## 5. Governed pipelines: CI/CD Components\n\n\nThe problem: Your platform team has defined the right way to build, test, and deploy. But every team has their own `.gitlab-ci.yml` with subtle variations. Security scanning gets skipped. Deployment standards drift. Audits are painful.\n\n\nGitLab [CI/CD Components](https://docs.gitlab.com/ci/components/) let platform teams publish versioned, reusable pipeline building blocks. Application teams consume them with a single `include:` line and optional inputs — no copy-paste, no drift. Components are discoverable through the [CI/CD Catalog](https://docs.gitlab.com/ci/components/#cicd-catalog), which means teams can find and adopt approved building blocks without needing to go through the platform team directly.\n\n\nHere is a component definition from a shared library:\n\n```yaml\n# templates/deploy.yml\nspec:\n  inputs:\n    stage:\n      default: deploy\n    environment:\n      default: production\n---\ndeploy-job:\n  stage: $[[ inputs.stage ]]\n  script:\n    - echo \"Deploying $APP_NAME to $[[ inputs.environment ]]\"\n    - echo \"Deploy URL: $DEPLOY_URL\"\n  environment:\n    name: $[[ inputs.environment ]]\n```\nAnd here is how an application team consumes it:\n\n```yaml\n# Application repo: .gitlab-ci.yml\nvariables:\n  APP_NAME: \"my-awesome-app\"\n  DEPLOY_URL: \"https://api.example.com\"\n\ninclude:\n  - component: gitlab.com/my-org/component-library/build@v1.0.6\n  - component: gitlab.com/my-org/component-library/test@v1.0.6\n  - component: gitlab.com/my-org/component-library/deploy@v1.0.6\n    inputs:\n      environment: staging\n\nstages:\n  - build\n  - test\n  - deploy\n```\n\nThree lines of `include:` replace hundreds of lines of duplicated YAML. The platform team can push a security fix to `v1.0.7` and teams opt in on their own schedule — or the platform team can pin everyone to a minimum version. Either way, one change propagates everywhere instead of needing to be applied repo by repo.\n\n\nPair this with [resource groups](https://docs.gitlab.com/ci/resource_groups/) to prevent concurrent deployments to the same environment, and [protected environments](https://docs.gitlab.com/ci/environments/protected_environments/) to enforce approval gates - and you have a governed delivery platform where compliance is the default, not the exception.\n\n\nWhy it matters: This is the pattern that makes GitLab CI/CD scale across hundreds of teams. Platform engineering teams enforce compliance without becoming a bottleneck. Application teams get a fast path to a working pipeline without reinventing the wheel.\n\n\n![Component pipeline (imported jobs)](https://res.cloudinary.com/about-gitlab-com/image/upload/v1775738776/Blog/Imported/hackathon-fake-blog-post-s/image2_pizuxd.png \"Component pipeline (imported jobs)\")\n\n## Putting it all together\n\nNone of these features exist in isolation. The reason GitLab's pipeline model is worth understanding deeply is that these primitives compose:\n\n*   A monorepo uses parent-child pipelines, and each child uses DAG execution\n\n*   A microservices platform uses multi-project pipelines, and each project uses MR pipelines with merged results\n\n*   A governed platform uses CI/CD components to standardize the patterns above across every team\n\n\nMost teams discover one of these features when they hit a specific pain point. The ones who invest in understanding the full model end up with a delivery system that actually reflects how their engineering organization works, not a pipeline that fights it.\n\n## Other patterns worth exploring\n\n\nThe five patterns above cover the most common structural pain points, but GitLab's pipeline model goes further. A few others worth looking into as your needs grow:\n\n\n*   [Review apps with dynamic environments](https://docs.gitlab.com/ci/environments/) let you spin up a live preview for every feature branch and tear it down automatically when the MR closes. Useful for teams doing frontend work or API changes that need stakeholder sign-off before merging.\n\n*   [Caching and artifact strategies](https://docs.gitlab.com/ci/caching/) are often the fastest way to cut pipeline runtime after the structural work is done. Structuring `cache:` keys around dependency lockfiles and being deliberate about what gets passed between jobs with [artifacts:](https://docs.gitlab.com/ci/yaml/#artifacts) can make a significant difference without changing your pipeline shape at all.\n\n*   [Scheduled and API-triggered pipelines](https://docs.gitlab.com/ci/pipelines/schedules/) are worth knowing about because not everything should run on a code push. Nightly security scans, compliance reports, and release automation are better modeled as scheduled or [API-triggered](https://docs.gitlab.com/ci/triggers/) pipelines with `$CI_PIPELINE_SOURCE` routing the right jobs for each context.\n\n## How to get started\n\nModern software delivery is complex. Teams are managing monorepos with dozens of services, coordinating across multiple repositories, deploying to many environments at once, and trying to keep standards consistent as organizations grow. GitLab's pipeline model was built with all of that in mind.\n\nWhat makes it worth investing time in is how well the pieces fit together. Parent-child pipelines bring structure to large codebases. Multi-project pipelines make cross-team dependencies visible and testable. Dynamic pipelines turn environment management into something that scales gracefully. MR-first delivery with merged results ensures confidence at every step of the review process. And CI/CD Components give platform teams a way to share best practices across an entire organization without becoming a bottleneck.\n\nEach of these features is powerful on its own, and even more so when combined. GitLab gives you the building blocks to design a delivery system that fits how your team actually works, and grows with you as your needs evolve.\n\n> [Start a free trial of GitLab Ultimate](https://about.gitlab.com/free-trial/) to use pipeline logic today.\n\n## Read more\n\n*   [Variable and artifact sharing in GitLab parent-child pipelines](https://about.gitlab.com/blog/variable-and-artifact-sharing-in-gitlab-parent-child-pipelines/)\n*   [CI/CD inputs: Secure and preferred method to pass parameters to a pipeline](https://about.gitlab.com/blog/ci-cd-inputs-secure-and-preferred-method-to-pass-parameters-to-a-pipeline/)\n*   [Tutorial: How to set up your first GitLab CI/CD component](https://about.gitlab.com/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component/)\n*   [How to include file references in your CI/CD components](https://about.gitlab.com/blog/how-to-include-file-references-in-your-ci-cd-components/)\n*   [FAQ: GitLab CI/CD Catalog](https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog/)\n*   [Building a GitLab CI/CD pipeline for a monorepo the easy way](https://about.gitlab.com/blog/building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way/)\n*   [A CI/CD component builder's journey](https://about.gitlab.com/blog/a-ci-component-builders-journey/)\n*   [CI/CD Catalog goes GA: No more building pipelines from scratch](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/)","5 ways GitLab pipeline logic solves real engineering problems","Learn how to scale CI/CD with composable patterns for monorepos, microservices, environments, and governance.",[1036],"Omid Khan","https://res.cloudinary.com/about-gitlab-com/image/upload/v1772721753/frfsm1qfscwrmsyzj1qn.png","2026-04-09",[89,1003,886,825],{"featured":13,"template":833,"slug":1041},"5-ways-gitlab-pipeline-logic-solves-real-engineering-problems",{"content":1043,"config":1052},{"title":1044,"description":1045,"authors":1046,"heroImage":1048,"date":1049,"body":1050,"category":750,"tags":1051},"How to use GitLab Container Virtual Registry with Docker Hardened Images","Learn how to simplify container image management with this step-by-step guide.",[1047],"Tim Rizzi","https://res.cloudinary.com/about-gitlab-com/image/upload/v1772111172/mwhgbjawn62kymfwrhle.png","2026-03-12","If you're a platform engineer, you've probably had this conversation:\n  \n*\"Security says we need to use hardened base images.\"*\n\n*\"Great, where do I configure credentials for yet another registry?\"*\n\n*\"Also, how do we make sure everyone actually uses them?\"*\n\nOr this one:\n\n*\"Why are our builds so slow?\"*\n\n*\"We're pulling the same 500MB image from Docker Hub in every single job.\"*\n\n*\"Can't we just cache these somewhere?\"*\n\nI've been working on [Container Virtual Registry](https://docs.gitlab.com/user/packages/virtual_registry/container/) at GitLab specifically to solve these problems. It's a pull-through cache that sits in front of your upstream registries — Docker Hub, dhi.io (Docker Hardened Images), MCR, and Quay — and gives your teams a single endpoint to pull from. Images get cached on the first pull. Subsequent pulls come from the cache. Your developers don't need to know or care which upstream a particular image came from.\n\nThis article shows you how to set up Container Virtual Registry, specifically with Docker Hardened Images in mind, since that's a combination that makes a lot of sense for teams concerned about security and not making their developers' lives harder.\n\n## What problem are we actually solving?\n\nThe Platform teams I usually talk to manage container images across three to five registries:\n\n* **Docker Hub** for most base images\n* **dhi.io** for Docker Hardened Images (security-conscious workloads)\n* **MCR** for .NET and Azure tooling\n* **Quay.io** for Red Hat ecosystem stuff\n* **Internal registries** for proprietary images\n\nEach one has its own:\n\n* Authentication mechanism\n* Network latency characteristics\n* Way of organizing image paths\n\nYour CI/CD configs end up littered with registry-specific logic. Credential management becomes a project unto itself. And every pipeline job pulls the same base images over the network, even though they haven't changed in weeks.\n\nContainer Virtual Registry consolidates this. One registry URL. One authentication flow (GitLab's). Cached images are served from GitLab's infrastructure rather than traversing the internet each time.\n\n## How it works\n\nThe model is straightforward:\n\n```text\nYour pipeline pulls:\n  gitlab.com/virtual_registries/container/1000016/python:3.13\n\nVirtual registry checks:\n  1. Do I have this cached? → Return it\n  2. No? → Fetch from upstream, cache it, return it\n\n```\n\nYou configure upstreams in priority order. When a pull request comes in, the virtual registry checks each upstream until it finds the image. The result gets cached for a configurable period (default 24 hours).\n\n```text\n┌─────────────────────────────────────────────────────────┐\n│                    CI/CD Pipeline                       │\n│                          │                              │\n│                          ▼                              │\n│   gitlab.com/virtual_registries/container/\u003Cid>/image   │\n└─────────────────────────────────────────────────────────┘\n                           │\n                           ▼\n┌─────────────────────────────────────────────────────────┐\n│            Container Virtual Registry                   │\n│                                                         │\n│  Upstream 1: Docker Hub ────────────────┐               │\n│  Upstream 2: dhi.io (Hardened) ────────┐│               │\n│  Upstream 3: MCR ─────────────────────┐││               │\n│  Upstream 4: Quay.io ────────────────┐│││               │\n│                                      ││││               │\n│                    ┌─────────────────┴┴┴┴──┐            │\n│                    │        Cache          │            │\n│                    │  (manifests + layers) │            │\n│                    └───────────────────────┘            │\n└─────────────────────────────────────────────────────────┘\n```\n\n## Why this matters for Docker Hardened Images\n\n[Docker Hardened Images](https://docs.docker.com/dhi/) are great because of the minimal attack surface, near-zero CVEs, proper software bills of materials (SBOMs), and SLSA provenance. If you're evaluating base images for security-sensitive workloads, they should be on your list.\n\nBut adopting them creates the same operational friction as any new registry:\n\n* **Credential distribution**: You need to get Docker credentials to every system that pulls images from dhi.io.\n* **CI/CD changes**: Every pipeline needs to be updated to authenticate with dhi.io.\n* **Developer friction**: People need to remember to use the hardened variants.\n* **Visibility gap**: It's difficult to tell if teams are actually using hardened images vs. regular ones.\n\nVirtual registry addresses each of these:\n\n**Single credential**: Teams authenticate to GitLab. The virtual registry handles upstream authentication. You configure Docker credentials once, at the registry level, and they apply to all pulls.\n\n**No CI/CD changes per-team**: Point pipelines at your virtual registry. Done. The upstream configuration is centralized.\n\n**Gradual adoption**: Since images get cached with their full path, you can see in the cache what's being pulled. If someone's pulling `library/python:3.11` instead of the hardened variant, you'll know.\n\n**Audit trail**: The cache shows you exactly which images are in active use. Useful for compliance, useful for understanding what your fleet actually depends on.\n\n## Setting it up\n\nHere's a real setup using the Python client from this demo project.\n\n### Create the virtual registry\n\n```python\nfrom virtual_registry_client import VirtualRegistryClient\n\nclient = VirtualRegistryClient()\n\nregistry = client.create_virtual_registry(\n    group_id=\"785414\",  # Your top-level group ID\n    name=\"platform-images\",\n    description=\"Cached container images for platform teams\"\n)\n\nprint(f\"Registry ID: {registry['id']}\")\n# You'll need this ID for the pull URL\n```\n\n### Add Docker Hub as an upstream\n\nFor official images like Alpine, Python, etc.:\n\n```python\ndocker_upstream = client.create_upstream(\n    registry_id=registry['id'],\n    url=\"https://registry-1.docker.io\",\n    name=\"Docker Hub\",\n    cache_validity_hours=24\n)\n```\n\n### Add Docker Hardened Images (dhi.io)\n\nDocker Hardened Images are hosted on `dhi.io`, a separate registry that requires authentication:\n\n```python\ndhi_upstream = client.create_upstream(\n    registry_id=registry['id'],\n    url=\"https://dhi.io\",\n    name=\"Docker Hardened Images\",\n    username=\"your-docker-username\",\n    password=\"your-docker-access-token\",\n    cache_validity_hours=24\n)\n```\n\n### Add other upstreams\n\n```python\n# MCR for .NET teams\nclient.create_upstream(\n    registry_id=registry['id'],\n    url=\"https://mcr.microsoft.com\",\n    name=\"Microsoft Container Registry\",\n    cache_validity_hours=48\n)\n\n# Quay for Red Hat stuff\nclient.create_upstream(\n    registry_id=registry['id'],\n    url=\"https://quay.io\",\n    name=\"Quay.io\",\n    cache_validity_hours=24\n)\n```\n\n### Update your CI/CD\n\nHere's a `.gitlab-ci.yml` that pulls through the virtual registry:\n\n```yaml\nvariables:\n  VIRTUAL_REGISTRY_ID: \u003Cyour_virtual_registry_ID>\n\n  \nbuild:\n  image: docker:24\n  services:\n    - docker:24-dind\n  before_script:\n    # Authenticate to GitLab (which handles upstream auth for you)\n    - echo \"${CI_JOB_TOKEN}\" | docker login -u gitlab-ci-token --password-stdin gitlab.com\n  script:\n    # All of these go through your single virtual registry\n    \n    # Official Docker Hub images (use library/ prefix)\n    - docker pull gitlab.com/virtual_registries/container/${VIRTUAL_REGISTRY_ID}/library/alpine:latest\n    \n    # Docker Hardened Images from dhi.io (no prefix needed)\n    - docker pull gitlab.com/virtual_registries/container/${VIRTUAL_REGISTRY_ID}/python:3.13\n    \n    # .NET from MCR\n    - docker pull gitlab.com/virtual_registries/container/${VIRTUAL_REGISTRY_ID}/dotnet/sdk:8.0\n```\n\n### Image path formats\n\nDifferent registries use different path conventions:\n\n| Registry | Pull URL Example |\n|----------|------------------|\n| Docker Hub (official) | `.../library/python:3.11-slim` |\n| Docker Hardened Images (dhi.io) | `.../python:3.13` |\n| MCR | `.../dotnet/sdk:8.0` |\n| Quay.io | `.../prometheus/prometheus:latest` |\n\n### Verify it's working\n\nAfter some pulls, check your cache:\n\n```python\nupstreams = client.list_registry_upstreams(registry['id'])\nfor upstream in upstreams:\n    entries = client.list_cache_entries(upstream['id'])\n    print(f\"{upstream['name']}: {len(entries)} cached entries\")\n\n```\n\n## What the numbers look like\n\nI ran tests pulling images through the virtual registry:\n\n| Metric | Without Cache | With Warm Cache |\n|--------|---------------|-----------------|\n| Pull time (Alpine) | 10.3s | 4.2s |\n| Pull time (Python 3.13 DHI) | 11.6s | ~4s |\n| Network roundtrips to upstream | Every pull | Cache misses only |\n\n\n\n\nThe first pull is the same speed (it has to fetch from upstream). Every pull after that, for the cache validity period, comes straight from GitLab's storage. No network hop to Docker Hub, dhi.io, MCR, or wherever the image lives.\n\nFor a team running hundreds of pipeline jobs per day, that's hours of cumulative build time saved.\n\n## Practical considerations\nHere are some considerations to keep in mind:\n\n### Cache validity\n\n24 hours is the default. For security-sensitive images where you want patches quickly, consider 12 hours or less:\n\n```python\nclient.create_upstream(\n    registry_id=registry['id'],\n    url=\"https://dhi.io\",\n    name=\"Docker Hardened Images\",\n    username=\"your-username\",\n    password=\"your-token\",\n    cache_validity_hours=12\n)\n```\n\nFor stable, infrequently-updated images (like specific version tags), longer validity is fine.\n\n### Upstream priority\n\nUpstreams are checked in order. If you have images with the same name on different registries, the first matching upstream wins.\n\n### Limits\n\n* Maximum of 20 virtual registries per group\n* Maximum of 20 upstreams per virtual registry\n\n## Configuration via UI\n\nYou can also configure virtual registries and upstreams directly from the GitLab UI—no API calls required. Navigate to your group's **Settings > Packages and registries > Virtual Registry** to:\n\n* Create and manage virtual registries\n* Add, edit, and reorder upstream registries\n* View and manage the cache\n* Monitor which images are being pulled\n\n## What's next\n\nWe're actively developing:\n\n* **Allow/deny lists**: Use regex to control which images can be pulled from specific upstreams.\n\nThis is beta software. It works, people are using it in production, but we're still iterating based on feedback.\n\n## Share your feedback\n\nIf you're a platform engineer dealing with container registry sprawl, I'd like to understand your setup:\n\n* How many upstream registries are you managing?\n* What's your biggest pain point with the current state?\n* Would something like this help, and if not, what's missing?\n\nPlease share your experiences in the [Container Virtual Registry feedback issue](https://gitlab.com/gitlab-org/gitlab/-/work_items/589630).\n## Related resources\n- [New GitLab metrics and registry features help reduce CI/CD bottlenecks](https://about.gitlab.com/blog/new-gitlab-metrics-and-registry-features-help-reduce-ci-cd-bottlenecks/#container-virtual-registry)\n- [Container Virtual Registry documentation](https://docs.gitlab.com/user/packages/virtual_registry/container/)\n- [Container Virtual Registry API](https://docs.gitlab.com/api/container_virtual_registries/)",[886,785,825],{"featured":690,"template":833,"slug":1053},"using-gitlab-container-virtual-registry-with-docker-hardened-images",{"category":760,"slug":762,"posts":1055},[1056,1067,1079],{"content":1057,"config":1065},{"title":1058,"description":1059,"authors":1060,"heroImage":1025,"body":1061,"date":1062,"category":762,"tags":1063},"GitLab named a 2026 Omdia Universe Leader","Omdia's 2026 report on AI-assisted software development ranked 19 vendors. Here is what GitLab's top scores mean for engineering teams.",[919],"GitLab is named a Leader in the 2026 Omdia Universe for AI-assisted Software Development, IDE-based Tools. Of the nineteen vendors evaluated by the independent analyst firm, GitLab earned best-in-class scores in three categories: Solution Breadth (100%), Strategy and Innovation (88%), and Core Features (82%). Top-tier ratings followed for Extended Features and Vendor Execution.\n\n\nThis year's assessment is notable for a specific reason: Omdia expanded its evaluation criteria, and for the first time, AI development tools were scored on full software lifecycle capability, not just coding. That shift mirrors where the AI evolution is heading and shook up which vendors came out on top.\n\n\n![Omdia Universe chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1775848262/asyd6bpbtwlhicqonhit.png \"Source: Omdia, Universe: AI-assisted Software Development, Part 1: IDE-based Tools, 2026\")\n> [Download the full Omdia Universe report.](https://learn.gitlab.com/c/analyst-omdia-ai?x=fRC1cQ)\n\n## About Omdia Universe\n\nOmdia Universe plots vendors across Solution Capability and Strategy and Execution, producing three tiers: Leaders (strongest on both axes, recommended for every shortlist), Challengers (narrower feature range or earlier in maturity), and Prospects (earlier-stage or adjacent-fit vendors).\n\n## What changed in this year's assessment\n\nThe expansion of Omdia's criteria reflects something practitioners are already experiencing. AI coding tools have raised developer output significantly, and applications that once took weeks can now be prototyped in a fraction of the time. But acceleration at the coding stage does not automatically translate to faster delivery. Review backlogs grow. Security findings accumulate. Deployments still require coordination across teams using tools that were not designed to work together.\n\nOmdia captured this dynamic directly: The tools pulling ahead are the ones that handle testing, security, deployment, and orchestration. Not just code generation. That finding drove the decision to broaden the assessment criteria and separated the Leaders from the Challengers.\n\nThe other major shift in this year's report is how Omdia treated agentic AI. The 2026 assessment weighted agentic capabilities as a current evaluation dimension rather than a future consideration. This includes whether a platform can coordinate multiple tasks autonomously, orchestrate handoffs between specialized agents, and support teams at different stages of agent adoption.\n\n## Where GitLab scored\n\nGitLab earned best-in-class scores in three categories:\n\n**Solution Breadth: 100%.** Coverage of the full SDLC in a single platform, from planning and requirements through deployment and issue management. This includes lifecycle phases that most AI coding tools do not touch. For example, prebuilt agents like [Planner Agent](https://docs.gitlab.com/user/duo_agent_platform/agents/foundational_agents/planner/) and [Security Analyst Agent](https://docs.gitlab.com/user/duo_agent_platform/agents/foundational_agents/security_analyst_agent/) extend AI assistance into sprint planning, vulnerability triage, and remediation guidance: the parts of the lifecycle where delivery actually gets stuck.\n\n**Strategy and Innovation: 88%.** Differentiation through end-to-end orchestration, privacy-first architecture with no training on private data, and multi-model support via partnerships with Anthropic, Google, and AWS. Teams can select models suited to their workload and data requirements. The platform's approach to unified context, where agents collaborate across issues, merge requests, pipelines, and security findings without losing state, is an example of the architectural innovation Omdia recognized in this category.\n\n**Core Features: 82%.** This score reflects deep coverage across the parts of the lifecycle where engineering teams spend most of their time. Code is generated with real-time context from the IDE and codebase, tested across unit, integration, and security dimensions, and reviewed with prioritization built in. DevOps automation handles CI/CD, GitOps, and [root cause analysis](https://docs.gitlab.com/user/gitlab_duo_chat/examples/#troubleshoot-failed-cicd-jobs-with-root-cause-analysis) for pipeline failures. The [AI Impact Dashboard](https://docs.gitlab.com/user/analytics/duo_and_sdlc_trends/) gives teams measurable visibility into cycle times, deployment frequency, and where AI is actually moving the needle on productivity.\n\nGitLab also earned top-tier recognition for Extended Features (80%) and Vendor Execution (88%).\n\n## The changing role of human developers and AI agents\n\nOne of the more substantive findings in the Omdia report concerns the evolving role of the software developer alongside these tools. Development teams are increasingly a mix of AI engineers and their AI agents, with engineers supervising and directing agentic AI. With AI coding generating the bulk of the code, the human's job shifts toward ensuring technology requirements are actually met, supervising quality, applying right guardrails, designing autonomous production pipelines, and mediating between business goals and the use of agentic AI across the software lifecycle.\n\nThis shift has implications for how organizations evaluate their AI investments. A team that has automated code generation but still handles review, testing, and deployment manually has not yet truly accelerated software innovation. The productivity gain from faster coding compounds when the rest of the lifecycle can keep pace. It shrinks when it cannot, and the bottlenecks move downstream instead.\n\n## Enterprise readiness as table stakes\n\nSomething notable in how Omdia structured this year's assessment: enterprise controls and guardrails are no longer a bonus category. Compliance certifications, deployment flexibility, and privacy architecture appeared as baseline expectations for Leader-tier platforms, not as distinguishing features. Organizations in regulated industries and those with data sovereignty requirements are now weighing these factors as entry criteria.\n\nGitLab's posture on these dimensions highlight its unique differentiation in the market: SOC 2 and ISO 27001 certified platform, [privacy-first design](https://about.gitlab.com/blog/why-enterprise-independence-matters-more-than-ever-in-devsecops/) with no training on private customer data for its agentic AI capabilities, self-managed deployment support across cloud and on-premises (including air-gapped environments), and support for self-hosted AI models. Its consumption as a single-tenant SaaS application via GitLab Dedicated, with FedRAMP Moderate Authorized via GitLab Dedicated for Government, extends its leadership in deployment flexibility. \n\nThe Omdia report recognized these not as a feature list but as evidence of the platform's readiness for the organizations where the compliance bar is highest: financial services, government, healthcare, and other regulated sectors that cannot compromise on data residency or auditability.\n\n## Benchmark your maturity in software development\n\nFor teams actively evaluating where their AI development strategy stands, Omdia's recommendation is clear: GitLab belongs on the top of the list.\n\nThe deeper question for most engineering leaders right now is not which AI tool generates the best code. It is whether the code being generated can be put to production with the highest level of quality, security, and performance. It must be understood, governed, and maintained by the software teams responsible for it. With GitLab, coding speed translates to innovation velocity.\n\nIf you want to benchmark your organization’s maturity in software development best practices and evolution, you can get a personalized score and concrete next steps to take in these assessments for [AI Modernization](https://about.gitlab.com/assessments/ai-modernization-assessment/), [DevOps Modernization](https://about.gitlab.com/assessments/devops-modernization-assessment/), and [Security Modernization](https://about.gitlab.com/assessments/security-modernization-assessment/). \n\n[Download the full Omdia Universe report.](https://learn.gitlab.com/c/analyst-omdia-ai?x=fRC1cQ)\n","2026-04-13",[1064,548,516,762],"research",{"featured":13,"template":833,"slug":1066},"gitlab-named-a-2026-omdia-universe-leader",{"content":1068,"config":1077},{"title":1069,"description":1070,"authors":1071,"heroImage":1073,"date":1074,"body":1075,"category":762,"tags":1076},"Introducing the GitLab Managed Service Provider (MSP) Partner Program","Build a profitable, services-led DevSecOps practice - backed by GitLab.",[1072],"Karishma Kumar","https://res.cloudinary.com/about-gitlab-com/image/upload/v1772047747/ntihfmnu2fepamqemaas.png","2026-02-26","*This blog is written for managed service providers (MSPs) looking to build a GitLab practice. If you’re a developer or engineering leader, this is the program that can empower the partners who help teams like yours scale and move faster.*\n\nMany organizations know they need a modern DevSecOps platform. What they often don't have is the bandwidth to deploy, manage, and continuously optimize one while shipping software at the pace the business demands. That's a real opportunity for MSPs, and now GitLab has a defined program to support them.\n\nWe're excited to introduce the **GitLab MSP Partner Program**, a new global program that enables qualified MSPs to deliver GitLab as a fully managed service to their customers.\n\n## Why this matters for partners and customers\n\nFor the first time, GitLab has a formally defined, globally available program built specifically for MSPs. This means clear requirements, structured enablement, dedicated support, and real financial benefits, so partners can confidently invest in building a GitLab managed services practice.\n\nThe timing is right. Organizations are accelerating their DevSecOps journeys, but many are navigating complex migrations, sprawling toolchains, and growing security requirements on top of their core work of building and shipping software.\n\nGitLab MSP partners handle the operational side of running the platform, including deployment, migration, administration, and ongoing support, so development teams can stay focused on what they do best.\n\n## What MSP partners get\n\n**Financial benefits**: MSP partners earn GitLab partner margins plus an additional MSP premium on all transactions, new business, and renewals. You also retain 100% of the service fees you charge customers for deployment, migration, training, enablement, and strategic consulting. That's multiple recurring revenue streams built around a single platform.\n\n**Enablement and education**: Partners have access to quarterly technical bootcamps covering version updates, new features, best practices, ongoing roadmap updates, and peer sharing. Recommended cloud certifications (AWS Solutions Architect Associate, GCP Associate Cloud Engineer) round out the technical foundation.\n\n**Go-to-market support**: MSPs receive a GitLab Certified MSP Partner badge, co-brandable assets, eligibility for joint customer case studies, a Partner Locator listing, and access to Marketing Development Funds (MDF) for qualified demand generation activities.\n\n## What customers can expect\n\nCustomers working with a GitLab MSP partner get a structured, managed DevSecOps experience, documented and repeatable implementation methodologies, regular business reviews, and support with clearly defined response and escalation paths.\n\nThe result: Development teams can stay focused on building great software while their MSP partner focuses on running and optimizing the platform.\n\n## A new opportunity around AI\n\nOrganizations are increasingly looking to safely introduce AI into their software development workflows, and even experienced teams can benefit from a structured approach to rolling it out at scale. GitLab MSP partners are well-positioned to guide customers through GitLab Duo Agent Platform as part of a broader managed services offering.\n\nBy combining GitLab's DevSecOps platform with MSP-delivered operational expertise, customers can experiment with AI-assisted workflows in a governed environment, meet data residency and compliance requirements, and scale AI adoption across teams without overburdening internal resources.\n\n## Is this right for your business?\n\nThe GitLab MSP Partner Program is a strong fit if you:\n\n* Already deliver managed services in cloud, infrastructure, or application operations  \n* Want to add high-value DevSecOps to your portfolio  \n* Have or want to build technical talent interested in modern development platforms  \n* Prefer long-term customer relationships over one-time transactions\n\nIf you're already a GitLab Select and Professional Services Partner, the MSP program gives you a structured way to turn your existing expertise into a repeatable managed offering.\n\n## Getting started\n\nThe program launches with the **Certified MSP Partner** designation. There's no minimum ARR or customer count required to join. Here's how the path looks:\n\n1. **Confirm fit** - Verify you meet the business and technical requirements outlined in the [handbook page](https://handbook.gitlab.com/handbook/resellers/channel-program-guide/#the-gitlab-managed-service-provider-msp-partner-program).  \n2. **Apply via the GitLab Partner Portal** - Submit your application with business and technical documentation.  \n3. **Complete 90-day onboarding** - A structured onboarding journey covers contracts, technical enablement, sales training, and your first customer engagement.  \n4. **Launch your managed offering** - Package your services, set your SLAs, and begin engaging customers.\n\nCompleted applications are reviewed within approximately three business days.\n\n> Interested in building a GitLab managed services practice? New partners can apply [to become a GitLab Partner](https://about.gitlab.com/partners/). Existing partners can reach out to your GitLab representative to learn more about the program and tell us about the solutions you're currently offering customers through your MSP practice!\n",[548,762,262],{"featured":690,"template":833,"slug":1078},"introducing-the-gitlab-managed-service-provider-msp-partner-program",{"content":1080,"config":1091},{"title":1081,"authors":1082,"date":1086,"body":1087,"category":762,"tags":1088,"description":1089,"heroImage":1090},"DevSecOps-as-a-Service on Oracle Cloud Infrastructure by Data Intensity",[1083,1084,1072,1085],"Biju Thomas","Matt Genelin","Ryan Palmaro","2026-02-10","At GitLab, we know that many organizations choose GitLab Self-Managed for the control, customization, and security it provides. However, managing underlying infrastructure can be a significant operational challenge — especially for teams who want to focus on delivering software, not maintaining platforms.\n\nThat's why we're excited to work with [Oracle Cloud Infrastructure (OCI)](https://www.oracle.com/cloud/) and [Data Intensity](https://www.dataintensity.com/services/security-services/devsecops/), a trusted Oracle managed services provider, to offer a new managed service option, DevSecOps-as-a-Service, that brings together the best of both worlds: the control of GitLab Self-Managed with the operational ease of a fully managed service.\n\n## Why GitLab Self-Managed?\n\nGitLab Self-Managed gives you complete ownership of your DevSecOps platform. You control where your data lives, how your instance is configured, and can customize it to meet specific compliance, security, or operational requirements. This level of control is essential for organizations with strict regulatory requirements, data residency needs, or specific integration must-haves.\n\nThe challenge for some customers running on GitLab Self-Managed means managing servers, handling upgrades, ensuring high availability, and implementing disaster recovery. All require specialized expertise and dedicated resources.\n\n## A managed path to GitLab Self-Managed\n\nData Intensity's DevSecOps-as-a-Service on OCI removes these operational burdens while preserving the control benefits of GitLab Self-Managed. Instead of building and maintaining infrastructure yourself, you get a standalone GitLab instance managed by Data Intensity's team of experts, running on OCI's high-performance cloud infrastructure.\n\nHere's what's included:\n\n* Standalone GitLab instance on OCI infrastructure\n* 24x7 monitoring, alarming, and support\n* Quarterly patching scheduled during your chosen maintenance windows\n* Automated backups and disaster recovery protection\n\n## Scaling with your organization\n\nData Intensity’s managed service is designed to grow with your team, offering tiered architectures to match your specific user capacity and recovery requirements:\n\n| **Feature**        | **Standard**    | **Premier**     | **Premier +**   |\n|--------------------|-----------------|-----------------|-----------------|\n| **User Capacity**  | Up to 1,000     | Up to 2,000     | Up to 3,000     |\n| **Performance**    | 20 requests/sec | 40 requests/sec | 60 requests/sec |\n| **Availability**   | 99.9%           | 99.95%          | 99.99%          |\n| **Recovery (RTO)** | 48 hours        | 8 hours         | 4 hours         |\n\nFor more information, visit Data Intensity’s website to learn more about [DevSecOps-as-a-Service](https://www.dataintensity.com/services/security-services/devsecops/).\n\n## Why OCI for GitLab?\nOracle Cloud Infrastructure (OCI) provides a robust foundation for running GitLab Self-Managed, offering a secure, high-performance environment at a significantly lower cost than other hyperscalers. Organizations migrating workloads to OCI commonly realize infrastructure cost reductions of 40-50%, making it easier to fund and scale deployments.\n\nOCI supports a wide range of deployment models, from public cloud regions to specialized environments such as Government and EU Sovereign Clouds, as well as dedicated infrastructure deployed behind your firewall. These options come with consistent pricing, tooling, and operational experience, enabling teams to standardize GitLab deployments across regulated, hybrid, and global environments.\n\nThe combination of GitLab's comprehensive DevSecOps platform, OCI's high-performance infrastructure, and Data Intensity's managed services expertise provides a turnkey solution that lets your teams focus on what matters: building great software.\n\n## Is this right for your organization?\nConsider Data Intensity's DevSecOps-as-a-Service if you:\n* Want GitLab Self-Managed but need to minimize operational overhead\n* Require specific compliance, security, or data residency requirements\n* Need guaranteed SLAs and professional disaster recovery capabilities\n* Prefer predictable costs and expert management over building in-house infrastructure expertise\n* Are already using or planning to use OCI for your cloud infrastructure\n* Prioritize flexibility and control\n* Want a dedicated instance that’s managed externally but offers the control of a self-managed environment\n\n## Getting started\nOrganizations interested in running GitLab Self-Managed on OCI through Data Intensity's DevSecOps-as-a-Service can contact Data Intensity via the [Data Intensity website](https://www.dataintensity.com/services/security-services/devsecops/) to discuss specific requirements and begin deployment planning.\n\nModernizing your DevSecOps doesn't have to be complex. Data Intensity provides optional migration of code repositories and customizations to ensure a smooth transition to OCI.\n\nAs GitLab continues expanding our partner ecosystem, solutions like this demonstrate our commitment to giving organizations choice in how they deploy and manage GitLab — whether that's SaaS, self-managed, or managed services through trusted partners.",[262,516],"Run GitLab Self-Managed with minimal overhead. Data Intensity delivers DevSecOps-as-a-Service on OCI with expert management and disaster recovery.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098794/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%289%29_DoeBNJVrhv9FpF3WCsHNc_1750098793762.png",{"featured":13,"template":833,"slug":1092},"devsecops-as-a-service-on-oracle-cloud-infrastructure-by-data-intensity",{"category":772,"slug":774,"posts":1094},[1095,1106,1118],{"content":1096,"config":1104},{"title":1097,"description":1098,"authors":1099,"heroImage":870,"date":1101,"body":1102,"category":774,"tags":1103},"GitLab AI Hackathon 2026: Meet the winners","Nearly 7,000 developers built 600+ AI agents and flows on GitLab Duo Agent Platform. Find out who won and what they created.",[1100],"Nick Veenhof","2026-04-22","AI writes code. That is expected now. But planning, security, compliance, and deployments? Those gaps remain. I have run contributor programs for years. I have never seen a community respond to technology like this.\n\nThat is why we opened [GitLab Duo Agent Platform](https://about.gitlab.com/gitlab-duo-agent-platform/) and invited developers worldwide to build AI agents that help teams ship secure software faster. Not chatbots that answer questions, but agents that jump into workflows, respond to events, and act on your behalf. The GitLab AI Hackathon ran from February 9 to March 25, 2026, on Devpost, the hackathon platform. Google Cloud and Anthropic joined as co-sponsors.\n\nWhen my team planned this hackathon with Google Cloud and Anthropic, I asked the judges to score four things: technical work, design, potential impact, and idea quality. We hoped for strong turnout. What we got surprised all of us. Nineteen judges spent 18 days reviewing every entry. Google Cloud and Anthropic provided judges, prizes, and cloud access. The community built hundreds of agents and flows because they wanted to solve these problems.\n\nNearly 7,000 developers showed up. They built 600+ agents and flows in weeks. The prizes across all categories totaled $65,000 from GitLab, Google Cloud, and Anthropic.\n\n\nIf you have ever watched a senior engineer leave and take half the team's knowledge with them, you know why the winning project hit so hard.\n\nRead on to find out what the community built.\n\n## Grand Prize: LORE\n\n[LORE](https://devpost.com/software/lore-living-organizational-record-engine), the Living Organizational Record Engine, uses eight agents with a router that sends each question to the right agent, logic to prevent circular loops in the knowledge graph, a visual dashboard, and carbon tracking. The command-line tool ships with 43 tests (yes, 43 tests in a hackathon project).\n\nLORE solves a real problem: the knowledge that lives in engineers' heads and walks out the door when they leave. In my experience, a hackathon project with 43 tests is rare. That many tests in a hackathon project tells you something about the team behind it.\n\nJudge April Guo (Anthropic) wrote: \"This feels like a product, not a hackathon project.\"\n\n\n### Google Cloud winners\n\n[Gitdefender](https://devpost.com/software/gitdefender) won the Google Cloud Grand Prize. It works inside code review workflows, finding and fixing security issues. It spots the bug, writes the fix, and opens the code review. No developer needs to step in.\n\n[Aegis](https://devpost.com/software/aegis-2m1oq0) won the Google Cloud Runner Up. It gives AI-powered explanations for every decision it makes, deployed to Google Cloud and ready for production use.\n\n### Anthropic winners\n\n[GraphDev](https://devpost.com/software/graphdev) won the Anthropic Grand Prize. It maps code links and shows how systems change over time. Judge Aboobacker MK (GitLab) noted it was \"in sync with our work on GitLab knowledge graph.\" Judge Ayush Billore (GitLab) wrote: \"Loved the demo and UX, super useful for understanding how the system evolved and what gets impacted by changes.\" You can see the full impact of a change before you make it.\n\n[DocSync](https://devpost.com/software/pipeheal) won the Anthropic Runner Up. It uses three agents: Detector, Writer, and Reviewer. If DocSync is confident in the fix, it opens a code review. If not, it creates an issue for a human to check.\n\n## Category winners\n\n### Most Technically Impressive\n\nDatabase migrations break things. [Time-Traveler](https://devpost.com/software/time-traveler-w3cxp0) creates a safe copy of your production setup, runs the migration against that copy, and reports the result. It runs five agents connected by a bridge, with real Google Cloud deployment, real PostgreSQL migrations, and real data.\n\n### Most Impactful\n\n[RedAgent](https://devpost.com/software/redagent) checks AI-generated security reports, closing the trust gap between AI findings and developer action. If your team uses AI for security scanning, you know this problem. I have seen teams dismiss AI findings because they could not verify them. RedAgent gives teams a way to check AI output before it reaches developers.\n\n### Easiest to Use\n\n[Launch Control](https://devpost.com/software/launch-control-bgp8az) delivers polished UX and solid infrastructure, and scored well on sustainability too.\n\n## The sustainability signal\n\nFive projects won prizes or bonuses for environmental impact. Software delivery has a carbon cost as CI/CD pipelines, but now LLMs also run compute at scale. We created the Green Agent category to challenge developers to measure and reduce that footprint. Stacy Cline and Kim Buncle from GitLab's sustainability team helped judge the Green Agent category. \n\n### Green Agent prize\n\n[GreenPipe](https://devpost.com/software/greenpipe) scans CI/CD pipelines for environmental impact and produces carbon footprint reports. Judges Kim Buncle and Rajesh Agadi (Google) both backed the project.\n\n### Sustainable Design bonus\n\nSustainable Design bonuses were awarded to the projects with exceptional sustainability practices in their design, from model optimization techniques to energy-efficient architecture choices.\n\n* [BugFlow](https://devpost.com/software/bugflow-ai-regression-detective-ci-optimizer) turned one bug report into 10 fixes in 20 minutes. \n* [DELTA Cyber Reasoning](https://devpost.com/software/delta-cyber-reasoning-system) is automated fuzz testing for security. \n* [CarbonLint](https://devpost.com/software/carbonlint) applied code analysis to energy use.\n* [TFGuardian](https://devpost.com/software/tfguardian) features a carbon footprint analyzer, among other agents.\n\nCongratulations on all the Sustainable Design bonus winners! \n\nJudge Jens-Joris Decorte (TechWolf) cited the result: Costs dropped from $556 to $18 per month, a 96% carbon cut (that is a $538 monthly saving with a sustainability label on it).\n\n## Honorable mentions and the long tail\n\nSix projects received honorable mentions:\n\n\n- [SecurityMonkey](https://devpost.com/software/securitymonkey) injects known vulnerabilities into a test branch and scores how well your security scanners catch them.\n- [stregent](https://devpost.com/software/stregent) monitors CI/CD pipelines and lets developers investigate and merge fixes from WhatsApp without opening a laptop.\n- [Compliance Sentinel](https://devpost.com/software/compliance-sentinel-autonomous-devsecops-governance) scores every merge request for compliance risk and blocks the merge if critical violations are detected.\n- [Carbon Tracker](https://devpost.com/software/carbon-tracker-ij25kf) calculates the carbon footprint of each CI/CD pipeline job and posts optimization tips on the merge request.\n- [RepoWarden](https://devpost.com/software/docuguard) is the first Living Specification Engine, an AI system that captures why code was written, not just what it does.\n- [MR Compliance Auditor](https://devpost.com/software/mr-compliance-auditor) collects evidence across merge requests, maps it to SOC 2 controls, and streams compliance scores to a live dashboard.\n\nMy favorite quote from the judging came from Luca Chun Lun Lit (Anthropic), who described stregent's mobile-first approach: \"Being able to essentially code from your phone is a next level in the engineering experience.\"\n\n> Explore the 600+ entries in the [project gallery](https://gitlab.devpost.com/project-gallery).\n\n## What comes next\n\nEvery agent in this hackathon worked within a single project. They still delivered impressive results. Some participants ran a local knowledge graph alongside their agents to surface code relationships and dependencies within the repo. LORE captures project history. Gitdefender finds vulnerabilities. Pairing agents with richer local context is already helping contributors build sharper tools. The next hackathon will build on what contributors are already doing with richer context. Sign up on [contributors.gitlab.com](https://contributors.gitlab.com/) to be the first to know when details drop.\n\n\n## Get started\n\nA special thanks to Lee Tickett (GitLab) and Mattias Michaux (GitLab) for orchestrating the orchestrators and innovators behind this hackathon!\n\nThank you to every developer who submitted. Nearly 7,000 of you showed what GitLab Duo Agent Platform can do when a community decides to build. I am proud of what you built here, and I cannot wait to see what you build next.\n\nBuild your own agent on [GitLab Duo Agent Platform](https://docs.gitlab.com/user/duo_agent_platform/). Browse community-built agents in the [AI Catalog](https://docs.gitlab.com/user/duo_agent_platform/ai_catalog/). You orchestrate. AI accelerates.\n",[704,247],{"featured":690,"template":833,"slug":1105},"gitlab-ai-hackathon-2026-meet-the-winners",{"content":1107,"config":1116},{"title":1108,"description":1109,"authors":1110,"heroImage":1112,"date":895,"category":774,"tags":1113,"body":1115},"What’s new in Git 2.54.0?","Learn about release contributions, including new repository maintenance, a new command to edit commit history, a replacement for git-sizer(1), and more.",[1111],"Patrick Steinhardt","https://res.cloudinary.com/about-gitlab-com/image/upload/v1776711651/sj7xxyyuimlarswbyft5.png",[951,1114,247],"git","The Git project recently released [Git 2.54.0](https://lore.kernel.org/git/xmqqa4uxsjrs.fsf@gitster.g/T/#u). Let's look at a few notable highlights from this release, which includes contributions from the Git team at GitLab.\n\n## Pluggable Object Databases\n\nGit already has the ability to store references with either the \"files\" backend or with the [\"reftable\" backend](https://about.gitlab.com/blog/a-beginners-guide-to-the-git-reftable-format/). This is achieved by having proper abstractions in Git that allows us to have different backends.\n\nBut references are just one of the two important types of data that are stored in repositories, with the other being objects. Objects are stored in the object database, and each object database in turn consists of multiple object sources where objects can be read from or written to. Each object source either stores individual objects as so-called \"loose\" objects, or compresses multiple objects into a \"packfile\" in your `.git/objects` directory.\n\nUntil now, however, these sources did not have a proper abstraction boundary, so the storage format for objects is completely hardcoded into Git. But this is finally changing with pluggable object databases! The concept is straightforward and similar to how we did this for references in the past: Instead of having hardcoded code paths for how to store objects, we introduce an abstraction boundary that allows us to have different backends for storing objects.\n\nWhile the idea is simple, the implementation is not, as we have hardcoded assumptions about the storage formats used in Git all over the place. In fact, we have started working on this topic in Git 2.48, which was released in January 2025. Initially, we focused on making object-related subsystems self-contained and creating proper subsystems for the existing backends that we had in Git.\n\nWith Git 2.54, we have now reached a milestone: The object database backend is now pluggable. Not all of Git's functionality is covered yet, but introducing an alternate backend that handles a meaningful subset of operations is now a realistic undertaking.\n\nFor now, only local workflows like creating commits, showing commit graphs, or performing merges will work with such an alternative implementation. This notably excludes anything that interacts with a remote, such as when you want to fetch or push changes. Regardless, this is the culmination of almost two years of work spanning across almost 400 commits that have been merged upstream, and we will of course continue to iterate on this effort.\n\nSo why does this matter? The idea is that it becomes practical to introduce new storage formats into Git. Examples could be:\n- A storage format that is able to store large binary files more efficiently\n  than packfiles do today\n\n- A storage format that is custom-tailored for GitLab to ensure that we can\n  serve repositories to our users even more efficiently than we currently can\n\n\nThis is a large-scale effort that is likely to shape the future of Git and GitLab.\n\n*This project was led by [Patrick Steinhardt](https://gitlab.com/pks-gitlab).*\n\n## Easier editing of your commit history\n\nIn many software development projects it is common practice for developers to not only polish the code they want to contribute, but to also polish the commit history so that it becomes easy to review. The result is a set of small and atomic commits that each do one thing, with a good commit message that describes the intent of the commit as well as specific nuances.\n\nOf course, more often than not, these atomic commits are not something that just happens naturally during the development process. Instead, the author of the changes will gain a better understanding of what they are while iterating on them, and the way to split up the commits will become clearer over time. Furthermore, the subsequent review process may result in feedback that requires changes to the crafted commits.\n\nThe consequence of this process is that the developer will have to rewrite their commit history many times during the development process. Historically, Git has allowed for this use case via [interactive rebases](https://git-scm.com/docs/git-rebase#_interactive_mode). These interactive rebases are an extremely powerful tool: They let you reorder commits, rewrite commit messages, squash multiple commits together, or perform arbitrary edits of any commit.\n\nBut they are also somewhat arcane and hard to understand. The user needs to figure out the base commit for the rebase, they need to understand how to edit a somewhat obscure \"instruction sheet,\" and they need to be aware of how the stateful rebasing process works. For example, users are presented with an instruction sheet similar to the following when rebasing a topic branch:\n\n```shell\npick b60623f382 # t: detect errors outside of test cases # empty\npick b80cb55882 # t: prepare `test_match_signal ()` calls for `set -e`\npick 5ffe397f30 # t: prepare `test_must_fail ()` for `set -e`\npick 5e9b0cf5e1 # t: prepare `stop_git_daemon ()` for `set -e`\npick 299561e7a2 # t: prepare `git config --unset` calls for `set -e`\npick ed0e7ca2b5 # t: detect errors outside of test cases\n```\n\nSo while interactive rebases are powerful, they are also quite intimidating for the average user.\n\nIt doesn't have to be this way, though. Tools like [Jujutsu](https://www.jj-vcs.dev/latest/) provide interfaces that are much easier to use compared to Git, as you can for example simply execute `jj split` to split up a commit into two commits. With Git and interactive rebases, this use case requires a lot of different steps with confusing command line arguments.\n\nWe have thus taken inspiration from Jujutsu and have introduced a new git-history(1) command into Git that is the foundation for better history editing. For now, this command has two subcommands:\n\n- `git history reword` allows you to easily rewrite a commit message. You simply\n  give it the commit whose message you want to reword, Git asks you for the new\n  commit message, and that's it.\n\n- `git history split` allows you to split up a commit into two, which is\n  inspired by `jj split`. You give it a commit, Git asks you which changes to\n  stage into which commit and for the two commit messages, and then you're done.\n\n\nThis is of course only a start, and we want to add additional subcommands over time. For example:\n\n- `git history fixup` to take staged changes and automatically amend them to a\n  specific commit\n\n- `git history drop` to remove a commit\n- `git history reorder` to reorder the sequence of commits\n- `git history squash` to squash a range of commits\n\nBut that's not all! In addition to making history editing easy, this new command also knows to automatically rebase all of your local branches that previously included this commit. So that means that you can even edit a commit that is not on the current branch, and all branches that contain the commit will be rewritten.\n\nIt may seem puzzling at first that Git is automatically rebasing dependent branches, as that is a significant diversion from how git-rebase(1) works. But this is part of a bigger effort to bring better support for Stacked Diffs to Git, which are a way to create a series of multiple dependent branches that can be reviewed independently, but that together work towards a bigger goal.\n\n*This project was led by [Patrick Steinhardt](https://gitlab.com/pks-gitlab) with support from [Elijah Newren](https://github.com/newren).*\n\n## A native replacement for git-sizer(1)\n\nThe size of a Git repository is an important factor that determines how well Git and GitLab can handle it. But size alone is not the only factor, as the performance of a repository is ultimately a combination of multiple different dimensions:\n\n- The depth of the commit history\n- The shape of the directory structure\n- The size of files stored in the repository\n- The number of references\n\nThese are only some of the dimensions one needs to consider when trying to predict whether Git will be able to handle a repository well.\n\nBut while it is clear that the mere repository size is insufficient, Git itself does not provide any tooling that gives the user an easy overview of these metrics. Instead, users are forced to rely on third-party tools like [git-sizer(1)](https://github.com/github/git-sizer) to fill this gap. This tool does an excellent job at surfacing this information, but it is not part of Git itself and thus needs to be installed separately.\n\nObservability of repository internals is critical to us at GitLab, so we introduced a [new `git repo structure` command into Git 2.52](https://about.gitlab.com/blog/whats-new-in-git-2-52-0/#new-subcommand-for-git-repo1-to-display-repository-metrics) to display repository metrics, which we have extended in Git 2.53 to [show inflated and disk sizes for objects by type](https://about.gitlab.com/blog/whats-new-in-git-2-53-0/#more-data-collected-in-git-repo-structure).\n\nIn Git 2.54, we are now iterating some more on this command so that we don't only show the overall size, but also show the largest objects by type:\n\n```shell\n$ git clone https://gitlab.com/git-scm/git.git\n$ cd git\n$ git repo structure\nCounting objects: 410445, done.\n| Repository structure      | Value       |\n| ------------------------- | ----------- |\n| * References              |             |\n|   * Count                 |    1.01 k   |\n|     * Branches            |       1     |\n|     * Tags                |    1.00 k   |\n|     * Remotes             |       9     |\n|     * Others              |       0     |\n|                           |             |\n| * Reachable objects       |             |\n|   * Count                 |  410.45 k   |\n|     * Commits             |   83.99 k   |\n|     * Trees               |  164.46 k   |\n|     * Blobs               |  161.00 k   |\n|     * Tags                |    1.00 k   |\n|   * Inflated size         |    7.46 GiB |\n|     * Commits             |   57.53 MiB |\n|     * Trees               |    2.33 GiB |\n|     * Blobs               |    5.07 GiB |\n|     * Tags                |  737.48 KiB |\n|   * Disk size             |  181.37 MiB |\n|     * Commits             |   33.11 MiB |\n|     * Trees               |   40.58 MiB |\n|     * Blobs               |  107.11 MiB |\n|     * Tags                |  582.67 KiB |\n|                           |             |\n| * Largest objects         |             |\n|   * Commits               |             |\n|     * Maximum size    [1] |   17.23 KiB |\n|     * Maximum parents [2] |      10     |\n|   * Trees                 |             |\n|     * Maximum size    [3] |   58.85 KiB |\n|     * Maximum entries [4] |    1.18 k   |\n|   * Blobs                 |             |\n|     * Maximum size    [5] | 1019.51 KiB |\n|   * Tags                  |             |\n\n|     * Maximum size    [6] |    7.13 KiB |\n\n[1] f6ecb603ff8af608a417d7724727d6bc3a9dbfdf\n[2] 16d7601e176cd53f3c2f02367698d06b85e08879\n[3] 203ee97047731b9fd3ad220faa607b6677861a0d\n[4] 203ee97047731b9fd3ad220faa607b6677861a0d\n[5] aa96f8bc361fd84a1459440f1e7de02ab0dc3543\n[6] 07e38db6a5a03690034d27104401f6c8ea40f1fc\n```\n\nWith this information we're now almost feature-complete as compared to git-sizer(1). We're not done yet, though — we plan to eventually add additional features such as:\n\n- Severity levels as they exist in git-sizer(1)\n- Graphs that show you the distribution of object sizes\n- The ability to scan objects reachable via a subset of references\n\n*This project was led by [Justin Tobler](https://gitlab.com/justintobler).*\n\n## New infrastructure for repository maintenance\n\nWhenever you write data into a Git repository you will typically end up adding more loose objects. Left unmanaged, this leads to a large number of separate files in your `.git/objects/` directory, which slows down several operations that want to access many objects at once. Git thus regularly packs these objects into \"packfiles\" to ensure good performance.\n\nThis isn't the only data structure that may become inefficient over time: Updating references may create loose references, reflogs will need trimming, worktrees may become stale, and caches like commit-graphs need to be refreshed regularly.\n\nAll of these tasks have historically been managed by [git-gc(1)](https://git-scm.com/docs/git-gc). However, this tool has a monolithic architecture, where it basically executes all of the tasks required in sequential order. This foundation is hard to extend and doesn't give the end user much flexibility in case they want to slightly modify how housekeeping is performed.\n\nThe Git project introduced the new [git-maintenance(1)](https://git-scm.com/docs/git-maintenance) tool in Git 2.29. In contrast to git-gc(1), git-maintenance(1) is not monolithic but is instead structured around tasks. These tasks are freely configurable by the user so that the user can control which tasks are running, giving them much more fine-grained control over repository maintenance.\n\nEventually, Git has migrated to use git-maintenance(1) by default. But in the beginning, the only task that was default-enabled was the git-gc(1) task, which as you might have guessed, simply executes `git gc`. To manually run maintenance using this new command you can execute `git maintenance run`, but Git knows to execute this automatically after several other commands.\n\nOver the last couple releases we have implemented all the individual tasks that are supported by git-gc(1) in git-maintenance(1) to ensure that we have feature parity between these two tools.\n\nFurthermore, we have implemented a new task that uses Git's modern architecture for repacking objects with [geometric compaction](https://git-scm.com/docs/git-repack#Documentation/git-repack.txt---geometricfactor).\nGeometric compaction is a much better fit for large monorepos, and with our efforts to make them work well with partial clones [that landed in Git 2.53](https://about.gitlab.com/blog/whats-new-in-git-2-53-0/#geometric-repacking-support-with-promisor-remotes) they are now a full replacement for our previous repacking strategy in Git.\n\nIn Git 2.54, we have now reached another significant milestone: Instead of using the git-gc(1)-based strategy by default, we are now using geometric repacking with fine-grained individual maintenance tasks! Besides being more efficient for large monorepos, it also ensures that we have an easier foundation to iterate on going forward.\n\n*The git-maintenance(1) infrastructure was originally implemented by [Derrick Stolee](https://github.com/derrickstolee) and geometric maintenance was introduced by [Taylor Blau](https://github.com/ttaylorr). The effort to introduce the new fine-grained tasks and migrate to the new maintenance strategy was led by [Patrick Steinhardt](https://gitlab.com/pks-gitlab).*\n\n## Read more\n\nThis article highlighted just a few of the contributions made by GitLab and the wider Git community for this latest release. You can learn about these from the [official release announcement](https://lore.kernel.org/git/xmqqa4uxsjrs.fsf@gitster.g/T/#u) of the Git project. Also, check out our [previous Git release blog posts](https://about.gitlab.com/blog/tags/git/) to see other past highlights of contributions from GitLab team members.",{"slug":1117,"featured":690,"template":833},"whats-new-in-git-2-54-0",{"content":1119,"config":1128},{"title":1120,"description":1121,"authors":1122,"date":1124,"body":1125,"heroImage":1126,"category":774,"tags":1127},"What’s new in Git 2.53.0?","Learn about release contributions, including fixes for geometric repacking, updates to git-fast-import(1) commit signature handing options, and more.",[1123],"Justin Tobler","2026-02-02","The Git project recently released [Git 2.53.0](https://lore.kernel.org/git/xmqq4inz13e3.fsf@gitster.g/T/#u). Let's look at a few notable highlights from this release, which includes\ncontributions from the Git team at GitLab.\n\n## Geometric repacking support with promisor remotes\n\nNewly written objects in a Git repository are often stored as individual loose files. To ensure good performance and optimal use of disk space, these loose objects are regularly compressed into so-called packfiles. The number of packfiles in a repository grows over time as a result of the user’s activities, like writing new commits or fetching from a remote. As the number of packfiles in a repository increases, Git has to do more work to look up individual objects. Therefore, to preserve optimal repository performance, packfiles are periodically repacked via git-repack(1) to consolidate the objects into fewer packfiles. When repacking there are two strategies: “all-into-one” and “geometric”.\n\nThe all-into-one strategy is fairly straightforward and the current default. As its name implies, all objects in the repository are packed into a single packfile. From a performance perspective this is great for the repository as Git only has to scan through a single packfile when looking up objects. The main downside of such a repacking strategy is that computing a single packfile for a repository can take a significant amount of time for large repositories.\n\nThe geometric strategy helps mitigate this concern by maintaining a geometric progression of packfiles based on their size instead of always repacking into a single packfile. To explain more plainly, when repacking Git maintains a set of packfiles ordered by size where each packfile in the sequence is expected to be at least twice the size of the preceding packfile. If a packfile in the sequence violates this property, packfiles are combined as needed until the progression is restored. This strategy has the advantage of still minimizing the number of packfiles in a repository while also minimizing the amount of work that must be done for most repacking operations.\n\nOne problem with the geometric repacking strategy was that it was not compatible with partial clones. Partial clones allow the user to clone only parts of a repository by, for example, skipping all blobs larger than 1 megabyte. This can significantly reduce the size of a repository, and Git knows how to backfill missing objects that it needs to access at a later point in time.\n\nThe result is a repository that is missing some objects, and any object that may not be fully connected is stored in a “promisor” packfile.  When repacking, this promisor property needs to be retained going forward for packfiles containing a promisor object so it is known whether a missing object is expected and can be backfilled from the promisor remote. With an all-into-one repack, Git knows how to handle promisor objects properly and stores them in a separate promisor packfile. Unfortunately, the geometric repacking strategy did not know to give special treatment to promisor packfiles and instead would merge them with normal packfiles without considering whether they reference promisor objects. Luckily, due to a bug the underlying git-pack-objects(1) dies when using geometric repacking in a partial clone repository. So this means repositories in this configuration were not able to be repacked anyways which isn’t great, but better than repository corruption.\n\nWith the release of Git 2.53, geometric repacking now works with partial clone repositories. When performing a geometric repack, promisor packfiles are handled separately in order to preserve the promisor marker and repacked following a separate geometric progression. With this fix, the geometric strategy moves closer towards becoming the default repacking strategy. For more information check out the corresponding [mailing list thread](https://lore.kernel.org/git/20260105-pks-geometric-repack-with-promisors-v1-0-c4660573437e@pks.im/).\n\nThis project was led by [Patrick Steinhardt](https://gitlab.com/pks-gitlab).\n\n## git-fast-import(1) learned to preserve only valid signatures\n\nIn our [Git 2.52 release article](https://about.gitlab.com/blog/whats-new-in-git-2-52-0/), we covered signature related improvements to git-fast-import(1) and git-fast-export(1). Be sure to check out that post for a more detailed explanation of these commands, how they are used, and the changes being made with regards to signatures.\n\nTo quickly recap, git-fast-import(1) provides a backend to efficiently import data into a repository and is used by tools such as [git-filter-repo(1)](https://github.com/newren/git-filter-repo) to help rewrite the history of a repository in bulk. In the Git 2.52 release, git-fast-import(1) learned the `--signed-commits=\u003Cmode>` option similar to the same option in git-fast-export(1). With this option, it became possible to unconditionally retain or strip signatures from commits/tags.\n\nIn situations where only part of the repository history has been rewritten, any signature for rewritten commits/tags becomes invalid. This means git-fast-import(1) is limited to either stripping all signatures or keeping all signatures even if they have become invalid. But retaining invalid signatures doesn’t make much sense, so rewriting history with git-repo-filter(1) results in all signatures being stripped, even if the underlying commit/tag is not rewritten. This is unfortunate because if the commit/tag is unchanged, its signature is still valid and thus there is no real reason to strip it. What is really needed is a means to preserve signatures for unchanged objects, but strip invalid ones.\n\nWith the release of Git 2.53, the git-fast-import(1) `--signed-commits=\u003Cmode>` option has learned a new `strip-if-invalid` mode which, when specified, only strips signatures from commits that become invalid due to being rewritten. Thus, with this option it becomes possible to preserve some commit signatures when using git-fast-import(1). This is a critical step towards providing the foundation for tools like git-repo-filter(1) to preserve valid signatures and eventually re-sign invalid signatures.\n\nThis project was led by [Christian Couder](https://gitlab.com/chriscool).\n\n## More data collected in git-repo-structure\n\nIn the Git 2.52 release, the “structure” subcommand was introduced to git-repo(1). The intent of this command was to collect information about the repository and eventually become a native replacement for tools such as [git-sizer(1)](https://github.com/github/git-sizer). At GitLab, we host some extremely large repositories, and having insight into the general structure of a repository is critical to understand its performance characteristics. In this release, the command now also collects total size information for reachable objects in a repository to help understand the overall size of the repository. In the output below, you can see the command now collects both the total inflated and disk sizes of reachable objects by object type.\n\n```shell\n$ git repo structure\n\n| Repository structure | Value      |\n| -------------------- | ---------- |\n| * References         |            |\n|   * Count            |   1.78 k   |\n|     * Branches       |      5     |\n|     * Tags           |   1.03 k   |\n|     * Remotes        |    749     |\n|     * Others         |      0     |\n|                      |            |\n| * Reachable objects  |            |\n|   * Count            | 421.37 k   |\n|     * Commits        |  88.03 k   |\n|     * Trees          | 169.95 k   |\n|     * Blobs          | 162.40 k   |\n|     * Tags           |    994     |\n|   * Inflated size    |   7.61 GiB |\n|     * Commits        |  60.95 MiB |\n|     * Trees          |   2.44 GiB |\n|     * Blobs          |   5.11 GiB |\n|     * Tags           | 731.73 KiB |\n|   * Disk size        | 301.50 MiB |\n|     * Commits        |  33.57 MiB |\n|     * Trees          |  77.92 MiB |\n|     * Blobs          | 189.44 MiB |\n|     * Tags           | 578.13 KiB |\n```\n\nThe keen-eyed among you may have also noticed that the size values in the table output are also now listed in a more human-friendly manner with units appended. In subsequent releases we hope to further expand this command's output to provide additional data points such as the largest individual objects in the repository.\n\nThis project was led by [Justin Tobler](https://gitlab.com/justintobler).\n\n## Read more\n\nThis article highlighted just a few of the contributions made by GitLab and\nthe wider Git community for this latest release. You can learn about these from\nthe [official release announcement](https://lore.kernel.org/git/xmqq4inz13e3.fsf@gitster.g/T/#u) of the Git project. Also, check\nout our [previous Git release blog posts](https://about.gitlab.com/blog/tags/git/)\nto see other past highlights of contributions from GitLab team members.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663087/Blog/Hero%20Images/git3-cover.png",[951,1114,247],{"featured":13,"template":833,"slug":1129},"whats-new-in-git-2-53-0",{"category":71,"slug":785,"posts":1131},[1132,1142,1155],{"content":1133,"config":1140},{"title":1134,"description":1135,"heroImage":1136,"date":1101,"category":785,"tags":1137},"GitLab Patch Release: 18.11.1, 18.10.4, 18.9.6","Discover what's in this latest patch release.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749661926/Blog/Hero%20Images/security-patch-blog-image-r2-0506-700x400-fy25_2x.jpg",[1138,1139],"patch releases","security releases",{"featured":690,"template":833,"externalUrl":1141},"https://docs.gitlab.com/releases/patches/patch-release-gitlab-18-11-1-released/",{"content":1143,"config":1153},{"title":1144,"description":1145,"body":1146,"category":785,"tags":1147,"date":1149,"authors":1150,"heroImage":906},"GitLab + Amazon: Platform orchestration on a trusted AI foundation","Pair GitLab Duo Agent Platform with Amazon Bedrock for agentic software development and orchestration.","If your team runs GitLab and has a strong AWS practice, a new combination of Duo Agent Platform and Amazon Bedrock is just for you. The model is simple: GitLab acts as your orchestration layer to help accelerate your entire software lifecycle with agentic AI, and Bedrock is designed to provide a secure, compliant foundation model layer with AI inference behind the scenes.\n\nGitLab Duo Agent Platform enables you to handle planning, merge pipelines, security scanning, vulnerability remediation, and more as part of your GitLab workflows, while the GitLab AI Gateway routes model calls to Bedrock (or GitLab-managed Bedrock-backed endpoints, depending on your setup). That means you can build on the identity and access management (IAM) policies, virtual private cloud (VPC) boundaries, regional controls, and cloud spend commitments you already have in AWS.\n\nIf you already use Amazon Bedrock and want AI to help inside the work you already do in GitLab, not in yet another standalone chat tool, this is the pairing for you.\n\n\nIn this article, we look at the real problem many teams face today: AI is fragmented, data paths are fuzzy, and Bedrock investment gets underused when AI sits outside the software development lifecycle. Then we break down your deployment options for GitLab Duo Agent Platform:\n\n* Integrated with self-hosted models on Amazon Bedrock for GitLab Self-Managed deployments and self-hosted AI gateway   \n* Integrated with GitLab-operated models on Amazon Bedrock (with GitLab-owned keys) for GitLab Self-Managed deployments and GitLab-hosted AI gateway  \n* Integrated with GitLab-operated models on Amazon Bedrock (with GitLab-owned keys) for GitLab.com instances and GitLab-hosted AI gateway\n\nWe wrap with a summary on how this approach helps avoid shadow AI and point-tool sprawl without creating a parallel tech stack for AI tooling.\n\n## AI everywhere, control nowhere\n\nSomewhere in your company right now, software teams might be using an AI tool that your security team hasn't approved. Prompt data might be leaving your environment through a path no one has fully mapped. And your organization’s Amazon Bedrock investment might be underused while individual teams expense separate AI tools, pulling workloads and cloud spend away from the platforms you’ve already committed to.\n\nInstead of being a people problem, this might be an architecture problem. And it surfaces the same three constraints in nearly every enterprise:\n\n**Operational fragmentation.** Each team, or sometimes even an individual developer, picks their own development toolset, including AI tooling and model selection. That fragmentation makes end-to-end governance within the software development lifecycle nearly impossible.\n\n**Security and sovereignty.** Where does prompt and code data actually flow? Who owns the logs?\n\n**Cloud spend optimization.** Commitments to key cloud providers like AWS are diluted as workloads and AI usage drift to point tools outside of customers’ existing agreements.\n\nGitLab Duo Agent Platform and Amazon Bedrock help solve this together. The division of labor is straightforward: Duo Agent Platform owns the workflow orchestration with agentic AI for software development, Bedrock owns the inference layer and hosts approved foundational models, and your organization has full control over the data and policy boundaries you already defined in AWS. Three jobs, three owners, no fragmentation.\n\n## GitLab Duo Agent Platform: The agentic control plane\n\nGitLab Duo Agent Platform is GitLab's agentic AI layer: a framework of specialized agents and flows that operate simultaneously and in-parallel, going beyond the traditional stage-based handoffs  and helping automate work across the entire software lifecycle. Rather than a single assistant responding to prompts, Duo Agent Platform enables teams to orchestrate many AI agents asynchronously using unified data and project context, including issues, merge requests, pipelines, and security findings. Linear workflows are turned into coordinated, continuous collaboration between software teams and their AI agents, at scale.\n\nWith that control plane in place, the natural next question is which AI foundation should power these agents. For customers who run GitLab Self-Managed on AWS and need inference traffic, prompt data, and logs to also stay within their AWS environment along with their software lifecycle data, Amazon Bedrock acting as the AI inference layer is the natural fit. \n\n## Amazon Bedrock: The trusted AI foundation\n\nAmazon Bedrock is a fully managed, serverless foundation model layer that runs entirely within your AWS environment. Customer data stays in the customer's AWS account: inputs and outputs are encrypted in transit and at rest, never shared with model providers, and never used to train base models. Bedrock carries compliance certifications across GDPR, HIPAA, and FedRAMP High, covering many regulated industry requirements out of the box. Teams can also bring fine-tuned models from elsewhere via Custom Model Import and deploy them alongside native Bedrock models through the same infrastructure, without managing separate deployment pipelines. Bedrock Guardrails adds configurable safeguards across all models for content filtering, hallucination detection, and sensitive data protection.\n\nTogether, GitLab Duo Agent Platform and Bedrock consolidate DevSecOps orchestration and AI model governance, helping eliminate the fragmentation that happens when teams roll out AI tools independently.\n\n## Choosing your deployment path\n\nThe integration delivers the same core GitLab Duo Agent Platform capabilities regardless of how it is deployed. What varies is who runs GitLab, who operates the AI Gateway, and whose Bedrock account the inference runs through. The right pattern depends on where your organization already operates.\n\nAt a high level, the integration has three main components:\n\n* **GitLab Duo Agent Platform:** agentic workflows embedded across the software development lifecycle  \n* **AI Gateway (GitLab-managed or self-hosted):** the abstraction layer between Duo Agent Platform and the foundational model backend   \n* **Amazon Bedrock:** the AI model and inference substrate\n\n![Deployment of GitLab and AWS Bedrock](https://res.cloudinary.com/about-gitlab-com/image/upload/v1776362365/udmvmv2efpmwtkxgydch.png)\n\nChoosing a deployment pattern is informed by where an organization wants to place the levers of control. The patterns below are designed to meet teams where they already are, whether that's SaaS-first, self-managed for compliance, or all-in on AWS with existing Bedrock investments.\n\n| Deployment Model | GitLab.com instance with GitLab-hosted AI Gateway with GitLab-operated Bedrock models   | GitLab Self-Managed with GitLab-hosted AI Gateway with GitLab-operated Bedrock models | GitLab Self-Managed  with self-hosted AI Gateway and customer-operated Bedrock models |\n| :---- | :---- | :---- | :---- |\n| **Ideal if you:** | Are primarily on GitLab.com and don’t want to self-host AI gateway and Bedrock models  | Need GitLab Self-Managed for compliance and operational reasons but don’t want to manage AI layer | Are AWS-centric with existing Bedrock usage and strict data/control needs  |\n| **Key Benefits** | Fastest, turnkey way to get Duo Agent Platform workflows: GitLab runs GitLab.com, the AI Gateway, integrated with Bedrock AI models. | Keep GitLab deployed in your own environment while consuming Bedrock models via a GitLab-managed AI Gateway, combining deployment control with simplified AI operations. | Run GitLab and AI Gateway in your AWS account, reuse existing IAM/VPC/regions, keep logs and data in your environment, and draw Bedrock usage from your existing AWS spend commitments. |\n\n## How customers use GitLab Duo Agent Platform with Amazon Bedrock\n\nPlatform teams can use GitLab Duo Agent Platform with Amazon Bedrock to standardize which models handle code suggestions, security analysis, and pipeline remediation. This helps enforce guardrails and logging centrally rather than letting individual teams adopt separate tools independently.\n\nSecurity workflows see particular benefit. GitLab Duo Agent Platform agents can propose and validate fixes for security findings within GitLab, helping reduce the manual triage work developers would otherwise handle outside the platform.\n\nFor enterprises already committed to AWS, routing AI workloads through Bedrock from within GitLab enables you to keep developer AI usage aligned with existing cloud agreements rather than generating separate, unplanned spend.\n\n## Closing the loop\n\nThe constraints that slow enterprise AI adoption are often not technical. They are organizational: fragmented tooling, ungoverned data flows, and cloud spend that never consolidates. Those are the problems that can stall AI programs even after the pilots succeed.\n\nGitLab Duo Agent Platform and Amazon Bedrock help address each one directly. Platform teams get consistent governance, auditability, and standardized paths for AI usage across the software development lifecycle. Development teams get streamlined, agentic workflows that feel native to GitLab. And AWS-centric organizations get to extend their existing Bedrock investment rather than build parallel AI infrastructure alongside it.\n\nThe result is an AI program that scales without fragmenting. Governance and velocity on the same stack, serving the same teams, under policies the organization already owns.\n\n\n> To explore which deployment pattern is right for your organization and how to align GitLab Duo Agent Platform and Amazon Bedrock with your existing AWS strategy, [contact the GitLab sales team](https://about.gitlab.com/sales/) and we’ll help you design and implement the best architecture for your environment. You can also [visit our AWS partner page](https://about.gitlab.com/partners/technology-partners/aws/) to learn more.",[262,1148,704],"AWS","2026-04-21",[1151,1152],"Joe Mann","Mark Kriaf",{"featured":13,"template":833,"slug":1154},"gitlab-amazon-platform-orchestration-on-a-trusted-ai-foundation",{"content":1156,"config":1164},{"title":1157,"description":1158,"authors":1159,"heroImage":1161,"date":921,"body":1162,"category":785,"tags":1163},"GitLab 18.11: Budget guardrails for GitLab Credits","Learn how new spending caps and per-user credit limits give organizations the budget guardrails to scale GitLab Duo Agent Platform.",[1160],"Bryan Rothwell","https://res.cloudinary.com/about-gitlab-com/image/upload/v1776259080/cakqnwo5ecp255lo8lzo.png","Teams using GitLab Duo Agent Platform with on-demand GitLab Credits are shipping faster, catching bugs earlier, and automating tasks that used to take entire sprints. But as adoption grows, so does oversight from finance, procurement, and platform teams to prove that AI spending is bounded, predictable, and controllable.\n\nOne of the greatest barriers to broader AI adoption isn't skepticism about the technology. It's uncertainty about managing spend. Without budget caps, a busy month could produce unexpected expenses. Without per-user limits, a handful of power users could burn through the team's credits before the month is over. And without either, engineering leaders who want to expand their use of agentic AI for software development have to jump through more hoops for budget approval.\n\nSince its [general availability](https://about.gitlab.com/blog/gitlab-duo-agent-platform-is-generally-available/), GitLab Duo Agent Platform has provided usage governance and visibility. With GitLab 18.11, we're introducing usage controls for [GitLab Credits](https://about.gitlab.com/blog/introducing-gitlab-credits/): spending caps and budget guardrails that give your organization even more control and transparency over how credits are consumed.\n\n## Managing GitLab Credits\n\nGitLab 18.11 adds three layers of control over GitLab Credits consumption: a subscription-level spending cap, per-user credit limits, and visibility into cap status and enforcement.\n\n### Subscription-level spending cap\n\nBilling account managers can now set a hard monthly ceiling for on-demand GitLab Credits consumption for their entire subscription.\n\nHere's how it works:\n\n* **Set a cap** in the `Customers Portal` under your subscription's GitLab Credits settings.  \n* **Enforce spend limits automatically.**  When on-demand usage reaches the cap, DAP access is paused for all users on that subscription until the next monthly period begins.  \n* **Make adjustments as you go.** Raise or disable the cap mid-month to restore access.\n\nThe cap resets each monthly period and your configured limit carries forward unless you change it. Because usage data is synchronized periodically rather than in real time, a small amount of additional usage may occur after the cap is reached before enforcement takes effect. See the [GitLab Credits documentation](https://docs.gitlab.com/subscriptions/gitlab_credits/) for details.\n\n### User-level spending caps\n\nNot every user consumes credits at the same rate, and that's expected. But when one or two power users account for a disproportionate share of the pool, the rest of the team can lose access before the month is over.\n\nPer-user credit caps prevent any single user from consuming more than their fair share:\n\n* **Flat per-user cap.** Set a uniform credit limit that applies equally to every user on the subscription through the GitLab GraphQL API. Unlike the subscription-level cap, the per-user cap applies to a user's total consumption across all credit sources.  \n* **Custom per-user overrides.** For organizations that need differentiated limits, you can set individual credit caps for specific users through the GraphQL API. For example, you could give your staff engineers a higher allocation while applying a standard limit to the broader team.  \n* **Individual enforcement.** When a user reaches their cap, they retain full access to GitLab. Only their Duo Agent Platform credit usage is paused until the next billing cycle. Everyone else keeps working uninterrupted until they hit their own limit or the subscription-level cap is reached, whichever comes first.\n\n### Visibility and notifications\n\nWhen a subscription-level cap is reached, GitLab sends an email notification to billing account managers so they can take action: raise the cap, wait for the next period, or redistribute credits.\n\nWithin GitLab, group owners (GitLab.com) and instance administrators (Self-Managed) can view which users have been blocked due to reaching their per-user cap and restore access by adjusting the cap through the GraphQL API. \n\n## How budget guardrails help organizations scale AI usage\n\nGuardrails are essential as organizations ramp up their AI adoption. Here's why:\n\n### Predictable AI budgets\n\nUsage controls for GitLab Duo Agent Platform turn AI into a bounded, predictable budget item using on-demand GitLab Credits. That makes it easier to deploy agents across the software development lifecycle and get sign-off from finance, justify renewals, and plan quarterly spend.\n\n### Governance and chargeback\n\nLarge organizations often need to align AI consumption with internal budgets, cost centers, or departmental policies. Per-user caps give platform teams a straightforward mechanism to allocate credits fairly and track consumption at the individual level. The API import options make it practical to manage caps at enterprise scale. Combined with per-user usage data from the GitLab Credits dashboard, organizations can track consumption patterns to inform their own internal chargeback or budget allocation processes.\n\n### Confidence to scale\n\nMany customers start GitLab Duo Agent Platform with a small pilot group. Usage controls remove risks associated with expanding that pilot across the organization. You can roll out Duo Agent Platform to hundreds or thousands of developers knowing there's a hard ceiling protecting your budget. If usage grows faster than expected, you'll hit the cap, not an unexpected invoice.\n\n## Addressing the seat-based and visibility conundrum\n\nMany AI coding tools take a seat-based approach to cost management. You buy a fixed number of seats at a flat per-user price, and that's your budget. It's simple, but rigid. You pay the same whether a developer uses the tool ten times a day or never touches it. And as vendors introduce premium models and usage-based overages on top of seat pricing, the cost predictability that seat-based licensing promised starts to erode.\n\n\nGitLab takes a different approach. Usage-based pricing with hard caps and a single governance dashboard. You get the flexibility of paying for what your teams actually use, with the budget predictability of enforced spending limits.\n\n## Real-world usage controls\n\n**One example is a mid-size SaaS customer that wants to protect their monthly budget.** A 200-person engineering organization sets a subscription-level cap equal to their expected on-demand usage. Their VP of Engineering can confidently tell finance that GitLab Duo Agent Platform spend will never exceed the approved amount, even as they onboard new teams. If they approach the cap mid-month, the billing account manager gets a notification and can decide whether to raise the limit or wait for the next period.\n\n**At GitLab, we also work with large enterprises that want to keep usage fair across teams.** A global financial services company with 2,000 developers uses per-user caps to ensure equitable access. Staff engineers working on complex refactoring projects get a higher individual allocation via API, while most developers receive a standard flat cap. No single user can exhaust the pool, and the platform team uses the per-user usage data in the GitLab Credits dashboard to track consumption patterns and inform quarterly budget planning.\n\n## Getting started\n\nUsage controls are available for both GitLab.com and Self-Managed customers running GitLab 18.11. Different controls are configured in different places depending on the scope and your role.\n\n**Subscription-level cap**\n\nBilling account managers set the subscription-level on-demand cap in the Customers Portal:\n\n1. Sign in to the `Customers Portal`.  \n2. On your subscription card, navigate to **GitLab Credits** settings.  \n3. Enable the monthly on-demand credits cap and enter your desired limit.\n\n**Flat per-user cap**\n\nThe flat per-user cap can be set through the GitLab GraphQL API by namespace owners (GitLab.com) or instance administrators (Self-Managed). Check the [GitLab Credits documentation](https://docs.gitlab.com/subscriptions/gitlab_credits/) for the latest on available configuration surfaces.\n\n**Custom per-user overrides**\n\nFor differentiated limits, namespace owners (GitLab.com) and instance administrators (Self-Managed) can set individual caps programmatically. This is useful for automation and infrastructure-as-code workflows.\n\n**Monitor usage and cap status**\n\n* **Customers Portal:** View detailed usage and cap status.  \n* **GitLab.com:** Group owners can view blocked users under **Settings > GitLab Credits**.  \n* **Self-Managed:** Instance administrators can view cap status and blocked users under **Admin > GitLab Credits**.\n\n## GitLab Duo Agent Platform is ready to scale\n\nUsage controls are available now in GitLab 18.11. If you've been waiting for the right guardrails before expanding GitLab Duo Agent Platform across your organization, this is your moment. Set your caps, roll out Duo Agent Platform to more teams, and start shipping faster!\n\n> [Learn more about GitLab Credits and usage controls](https://docs.gitlab.com/subscriptions/gitlab_credits/).",[785,704,762],{"featured":690,"template":833,"slug":1165},"gitlab-18-11-budget-guardrails-for-gitlab-credits",{"category":104,"slug":796,"posts":1167},[1168,1179,1191],{"content":1169,"config":1177},{"title":1170,"description":1171,"authors":1172,"date":895,"body":1174,"category":796,"tags":1175,"heroImage":1176},"Prepare your pipeline for AI-discovered zero-days","AI is finding vulnerabilities faster than teams can patch. Learn how pipeline enforcement, automated triage, and AI remediation close the gap.",[1173],"Omer Azaria","Anthropic's [Mythos Preview model](https://red.anthropic.com/2026/mythos-preview/) recently identified thousands of zero-day vulnerabilities across every major operating system and web browser, including an OpenBSD bug that went undetected for 27 years. In testing, Mythos autonomously chained four vulnerabilities into a working browser exploit that escaped its sandbox. Anthropic is restricting access to Mythos, but the company’s head of offensive cyber research expects threats to have comparable tooling within six to twelve months.\n\nThe defender side of the equation hasn't kept pace. One third of exploited Common Vulnerabilities and Exposures (CVEs) in the first half of 2025 showed activity on or before disclosure day, before most teams even know there's something to patch. AI is compressing that window further, accelerating attackers and flooding teams with whitehat disclosures faster than they can triage. Defender tooling has improved, but most organizations can't operationalize it fast enough to close the gap between discovery and exploitation.\n\nWhen the window between disclosure and exploitation is measured in hours, the security team can't be the last line of defense. Security has to run where code enters the system: in the pipeline, on every merge request, enforced by policy. The fixes that can be automated should be. The ones that can't need to reach the right human faster than they do today.\n\n## Known vulnerabilities are already outpacing remediation\n\nThe bottleneck isn't detection, it's acting at scale on what teams already know. Sixty percent of breaches in the 2025 Verizon DBIR involved exploiting known vulnerabilities where a patch was already available. Teams couldn’t close them in time.\n\nThe backlog was untenable before Mythos. Developers spend [11 hours per month remediating vulnerabilities](https://about.gitlab.com/resources/developer-survey/) post-release instead of shipping new work. Over half of organizations have at least one open internet-facing vulnerability, and the median time to close half of those is 361 days. Exploitation takes hours, while remediation takes months.\n\nAI-assisted development is widening the gap, and stakeholders know it. By June 2025, AI-generated code was adding over 10,000 new security findings per month across Fortune 50 repositories, a 10x jump from six months earlier. Georgia Tech identified 34 [CVEs attributable to AI-generated code](https://research.gatech.edu/bad-vibes-ai-generated-code-vulnerable-researchers-warn) in March 2026, up from 6 in January, and that count reflects only the ones where AI authorship is clear. AI coding assistants hallucinate package names, reach for outdated patterns, and copy insecure examples from training data. More code, more dependencies, and more vulnerabilities per line are generated faster than security teams can review them.\n\nDefenders need to harness frontier AI models, too — not bolted onto the SDLC as external tooling, but running inside the same policies, approvals, and audit trail as the rest of the team. \n\n## Security at the speed of AI coding\n\nWhen a critical CVE drops, how quickly can your team confirm which projects are affected? How many tools does an alert cross before a developer can submit a fix?\n\nThe teams that benefit most from AI already have policies, enforcement, and controls embedded in their development workflows. AI amplifies that foundation. It doesn't replace it.\n\n**Enforcement at the point of change.** As exploitation windows compress, every line of code entering a repository needs to pass through a defined set of controls. Not a separate review, in a different tool, by a different team. Organizations need the ability to enforce security policies across every group and project, with the merge request as the enforcement point. Policies defined once, applied everywhere, with exceptions reviewed, approved, and logged.\n\n**Simple issues caught before the merge request, not during.** Hardcoded secrets, known-vulnerable imports, and deprecated API calls can be flagged in the IDE before a developer pushes a commit. Catching them at authoring time means fewer findings blocking the MR, so review cycles go to the findings that require cross-component context: reachability, exploitability, and architectural risk.\n\n**Triage automated by default, not by exception.** Embedding security into every merge request creates a volume problem. More scans, more findings, more noise reaching developers who aren’t trained to distinguish a reachable critical from a theoretical one. AI must handle false positive detection, reachability, exploitability context, and severity assessment before a developer sees the finding, so the findings they see actually warrant their time.\n\n**Remediation governed like any other change.** AI-based remediation compresses the timeline for closing vulnerabilities, but every generated fix must move through the same governance as a human-authored change: policies enforce scans, the right reviewers approve, and evidence is recorded. GitLab’s automated remediation capability proposes each fix in a merge request with a confidence score. The MR records which policy applied, which scans ran, what they found, and who approved. Human code and AI-generated code move through the same process, with the same audit trail.\n\n## What a ready pipeline looks like\n\nHere's how these pieces work together when a high-severity vulnerability is discovered and the clock is running.\n\nA proof-of-concept exploit for a vulnerability in a popular open-source package appears on a security mailing list. There’s no CVE, no National Vulnerability Database (NVD) entry, and no scanner signature yet. The security team finds out the usual way: someone shares it in Slack.\n\nA security engineer asks the security agent if the package is in use, which projects have affected versions, and whether any vulnerable call paths are reachable in production. The agent checks the dependency graph for every project, matches the affected versions and entry points from the disclosure, and returns a ranked list of exposed projects with details about reachability. There’s no need to search through repositories by hand or wait for a scanner update. The question, \"Are we exposed?\" is answered in minutes.\n\nThe engineer starts a remediation campaign for every exposed project. The remediation agent suggests fixes: version updates where a patched release is available, and targeted call-path patches where it is not. Scan execution policies are already in place for projects tagged SOC 2. The engineer hardens the rules to block merges on any merge request that introduces or keeps the affected dependency, and an approval policy now requires security sign-off on every fix. The agent's first proposed patch fails the pipeline when an integration test catches a regression. The agent revises the patch based on the test failure, and the second attempt passes. Developers review the changes, security signs off under the stricter policy, and merges proceed across the campaign.\n\nAt the next audit review, the security team presents a report showing how policies were enforced and risks were reduced during the campaign. It includes scan results, policies applied, approvers, and merge timestamps for every MR in every affected project. The evidence was automatically generated in flight, not assembled after the fact.\n\n## Close the gaps now\n\nMythos exists today, and comparable models will be in attacker hands within a year. Every month between now and then is a chance to strengthen your software supply chain.\n\nAsk these questions about your pipeline:\n\n* How do you enforce that security scans run on every merge request, not just the projects where teams configured them?\n\n* If a compromised package entered your dependency tree today, would your pipeline catch it before build?\n\n* When a scanner flags a critical finding, how many tool boundaries does it cross before a developer starts the fix?\n\n* If an AI agent proposed a code fix for a vulnerability, what process would that fix go through before reaching production, and is that process auditable?\n\n* When auditors ask for evidence that a specific policy was enforced on a specific change, how long does it take to produce?\n\nIf the answers expose gaps, address them now. [Talk to a GitLab solutions architect](https://about.gitlab.com/sales/) about the role of security governance in your development lifecycle.",[704,796,516],"https://res.cloudinary.com/about-gitlab-com/image/upload/v1772195014/ooezwusxjl1f7ijfmbvj.png",{"featured":13,"template":833,"slug":1178},"prepare-your-pipeline-for-ai-discovered-zero-days",{"content":1180,"config":1189},{"title":1181,"description":1182,"authors":1183,"heroImage":1185,"date":1186,"category":796,"tags":1187,"body":1188},"Manage vulnerability noise at scale with auto-dismiss policies","Learn how to cut through scanner noise and focus on the vulnerabilities that matter most with GitLab security, including use cases and templates.",[1184],"Grant Hickman","https://res.cloudinary.com/about-gitlab-com/image/upload/v1774375772/kpaaaiqhokevxxeoxvu0.png","2026-03-25",[796,886,548,825,785],"Security scanners are essential, but not every finding requires action. Test code, vendored dependencies, generated files, and known false positives create noise that buries the vulnerabilities that actually matter. Security teams waste hours manually dismissing the same irrelevant findings across projects and pipelines. They experience slower triage, alert fatigue, and developer friction that undermines adoption of security scanning itself.\n\nGitLab's auto-dismiss vulnerability policies let you codify your triage decisions once and apply them automatically on every default-branch pipeline. Define criteria based on file path, directory, or vulnerability identifier (CVE, CWE), choose a dismissal reason, and let GitLab handle the rest.\n\n## Why auto-dismiss?\nAuto-dismiss vulnerability policies enable security teams to:\n- **Eliminate triage noise**: Automatically dismiss findings in test code, vendored dependencies, and generated files.\n- **Enforce decisions at scale**: Apply policies centrally to dismiss known false positives across your entire organization.\n- **Maintain audit transparency**: Every auto-dismissed finding includes a documented reason and links back to the policy that triggered it.\n- **Preserve the record**: Unlike scanner exclusions, dismissed vulnerabilities remain in your report, so you can revisit decisions if conditions change.\n\n## How auto-dismiss policies work\n\n1. **Define your policy** in a vulnerability management policy YAML file. Specify match criteria (file path, directory, or identifier) and a dismissal reason.\n\n2. **Merge and activate.** Create the policy via **Secure > Policies > New  policy > Vulnerability management policy**. Merge the MR to enable it.\n3. **Run your pipeline.** On every default-branch pipeline, matching vulnerabilities are automatically set to \"Dismissed\" with the specified reason. Up to 1,000 vulnerabilities are processed per run.\n4. **Measure the impact.** Filter your vulnerability report by status \"Dismissed\" to see exactly what was cleaned up and validate that the right findings are being handled.\n\n## Use cases with ready-to-use configurations\n\nEach example below includes a policy configuration you can copy, customize, and apply immediately.\n\n### 1. Dismiss test code vulnerabilities\n\nSAST and dependency scanners flag hardcoded credentials, insecure fixtures, and dev-only dependencies in test directories. These are not production risks.\n\n```yaml\nvulnerability_management_policy:\n  - name: \"Dismiss test code vulnerabilities\"\n    description: \"Auto-dismiss findings in test directories\"\n    enabled: true\n    rules:\n      - type: detected\n        criteria:\n          - type: file_path\n            value: \"test/**/*\"\n      - type: detected\n        criteria:\n          - type: file_path\n            value: \"tests/**/*\"\n      - type: detected\n        criteria:\n          - type: file_path\n            value: \"spec/**/*\"\n      - type: detected\n        criteria:\n          - type: directory\n            value: \"__tests__/*\"\n    actions:\n      - type: auto_dismiss\n        dismissal_reason: used_in_tests\n\n```\n\n### 2. Dismiss vendored and third-party code\n\nVulnerabilities in `vendor/`, `third_party/`, or checked-in `node_modules` are managed upstream and not actionable for your team.\n\n```yaml\nvulnerability_management_policy:\n  - name: \"Dismiss vendored dependency findings\"\n    description: \"Findings in vendored code are managed upstream\"\n    enabled: true\n    rules:\n      - type: detected\n        criteria:\n          - type: directory\n            value: \"vendor/*\"\n      - type: detected\n        criteria:\n          - type: directory\n            value: \"third_party/*\"\n      - type: detected\n        criteria:\n          - type: directory\n            value: \"vendored/*\"\n    actions:\n      - type: auto_dismiss\n        dismissal_reason: not_applicable\n\n```\n\n### 3. Dismiss known false positive CVEs\n\nCertain CVEs are repeatedly flagged but don't apply to your usage context. Teams dismiss these manually every time they appear. Replace the example CVEs below with your own.\n\n```yaml\nvulnerability_management_policy:\n  - name: \"Dismiss known false positive CVEs\"\n    description: \"CVEs confirmed as false positives for our environment\"\n    enabled: true\n    rules:\n      - type: detected\n        criteria:\n          - type: identifier\n            value: \"CVE-2023-44487\"\n      - type: detected\n        criteria:\n          - type: identifier\n            value: \"CVE-2024-29041\"\n      - type: detected\n        criteria:\n          - type: identifier\n            value: \"CVE-2023-26136\"\n    actions:\n      - type: auto_dismiss\n        dismissal_reason: false_positive\n\n```\n\n### 4. Dismiss generated and auto-created code\n\nProtobuf, gRPC, OpenAPI generators, and ORM scaffolding tools produce files with flagged patterns that cannot be patched by your team.\n\n```yaml\nvulnerability_management_policy:\n  - name: \"Dismiss generated code findings\"\n    description: \"Generated files are not authored by us\"\n    enabled: true\n    rules:\n      - type: detected\n        criteria:\n          - type: directory\n            value: \"generated/*\"\n      - type: detected\n        criteria:\n          - type: file_path\n            value: \"**/*.pb.go\"\n      - type: detected\n        criteria:\n          - type: file_path\n            value: \"**/*.generated.*\"\n    actions:\n      - type: auto_dismiss\n        dismissal_reason: not_applicable\n\n```\n\n### 5. Dismiss infrastructure-mitigated vulnerabilities\n\nVulnerability classes like XSS (CWE-79) or SQL injection (CWE-89) that are already addressed by WAF rules or runtime protection. Only use this when mitigating controls are verified and consistently enforced.\n\n```yaml\nvulnerability_management_policy:\n  - name: \"Dismiss CWEs mitigated by WAF\"\n    description: \"XSS and SQLi mitigated by WAF rules\"\n    enabled: true\n    rules:\n      - type: detected\n        criteria:\n          - type: identifier\n            value: \"CWE-79\"\n      - type: detected\n        criteria:\n          - type: identifier\n            value: \"CWE-89\"\n    actions:\n      - type: auto_dismiss\n        dismissal_reason: mitigating_control\n\n```\n\n### 6. Dismiss CVE families across your organization\n\nA wave of related CVEs for a widely-used library your team has assessed? Apply at the group level to dismiss them across dozens of projects. The wildcard pattern (e.g., `CVE-2021-44*`) matches all CVEs with that prefix.\n\n```yaml\nvulnerability_management_policy:\n  - name: \"Accept risk for log4j CVE family\"\n    description: \"Log4j CVEs mitigated by version pinning and WAF\"\n    enabled: true\n    rules:\n      - type: detected\n        criteria:\n          - type: identifier\n            value: \"CVE-2021-44*\"\n    actions:\n      - type: auto_dismiss\n        dismissal_reason: acceptable_risk\n\n```\n\n## Quick reference\n\n| Parameter | Details |\n|-----------|---------|\n| **Criteria types** | `file_path` (glob patterns, e.g., `test/**/*`), `directory` (e.g., `vendor/*`), `identifier` (CVE/CWE with wildcards, e.g., `CVE-2023-*`) |\n| **Dismissal reasons** | `acceptable_risk`, `false_positive`, `mitigating_control`, `used_in_tests`, `not_applicable` |\n| **Criteria logic** | Multiple criteria within a rule = AND (must match all). Multiple rules within a policy = OR (match any). |\n| **Limits** | 3 criteria per rule, 5 rules per policy, 5 policies per security policy project. Vulnerabilty management policy actions process 1000 vulnerabilities per pipeline run in the target project, until all matching vulnerabilities are processed. |\n| **Affected statuses** | Needs triage, Confirmed |\n| **Scope** | Project-level or group-level (group-level applies across all projects) |\n\n## Getting started\nHere's how to get started with auto-dismiss policies:\n\n1. **Identify the noise.** Open your vulnerability report and sort by \"Needs triage.\" Look for patterns: test files, vendored code, the same CVE across projects.\n\n2. **Pick a scenario.** Start with whichever use case above accounts for the most findings.\n\n3. **Record your baseline.** Note the number of \"Needs triage\" vulnerabilities before creating a policy.\n\n4. **Create and enable.** Navigate to **Secure > Policies > New policy > Vulnerability management policy**. Paste the configuration from the use case above, then merge the MR.\n\n5. **Validate results.** After the next default-branch pipeline, filter by status \"Dismissed\" to confirm the right findings were handled.\n\nFor full configuration details, see the [vulnerability management policy documentation](https://docs.gitlab.com/user/application_security/policies/vulnerability_management_policy/#auto-dismiss-policies).\n\n> Ready to take control of vulnerability noise? [Start a free GitLab Ultimate trial](https://about.gitlab.com/free-trial/) and configure your first auto-dismiss policy today.\n",{"slug":1190,"featured":13,"template":833},"auto-dismiss-vulnerability-management-policy",{"content":1192,"config":1200},{"title":1193,"description":1194,"authors":1195,"heroImage":831,"date":1197,"body":1198,"category":796,"tags":1199},"GitLab 18.10 brings AI-native triage and remediation ","Learn about GitLab Duo Agent Platform capabilities that cut noise, surface real vulnerabilities, and turn findings into proposed fixes.",[1196],"Alisa Ho","2026-03-19","GitLab 18.10 introduces new AI-powered security capabilities focused on improving the quality and speed of vulnerability management. Together, these features can help reduce the time developers spend investigating false positives and bring automated remediation directly into their workflow, so they can fix vulnerabilities without needing to be security experts.\n\nHere is what’s new:\n\n* [**Static Application Security Testing (SAST) false positive detection**](https://docs.gitlab.com/user/application_security/vulnerabilities/false_positive_detection/) **is now generally available.** This flow uses an LLM for agentic reasoning to determine the likelihood that a vulnerability is a false positive or not, so security and development teams can focus on remediating critical vulnerabilities first.  \n* [**Agentic SAST vulnerability resolution**](https://docs.gitlab.com/user/application_security/vulnerabilities/agentic_vulnerability_resolution/) **is now in beta.** Agentic SAST vulnerability resolution automatically creates a merge request with a proposed fix for verified SAST vulnerabilities, which can shorten time to remediation and reduce the need for deep security expertise.  \n* [**Secret false positive detection**](https://docs.gitlab.com/user/application_security/vulnerabilities/secret_false_positive_detection/) **is now in beta.** This flow brings the same AI-powered noise reduction to secret detection, flagging dummy and test secrets to save review effort.\n\nThese flows are available to GitLab Ultimate customers using GitLab Duo Agent Platform. \n\n## Cut triage time with SAST false positive detection\n\nTraditional SAST scanners flag every suspicious code pattern they find, regardless of whether code paths are reachable or frameworks already handle the risk. Without runtime context, they cannot distinguish a real vulnerability from safe code that just looks dangerous.\n\nThis means developers could spend hours investigating findings that turn out to be false positives. Over time, that can erode confidence in the report and slow down the teams responsible for fixing real risks.\n\nAfter each SAST scan, GitLab Duo Agent Platform automatically analyzes new critical and high severity findings and attaches:\n\n* A confidence score indicating how likely the finding is to be a false positive  \n* An AI-generated explanation describing the reasoning  \n* A visual badge that makes “Likely false positive” versus “Likely real” easy to scan in the UI\n\nThese findings appear in the [Vulnerability Report](https://docs.gitlab.com/user/application_security/vulnerability_report/), as shown below. You can filter the report to focus on findings marked as “Not false positive” so teams can spend their time addressing real vulnerabilities instead of sifting through noise.\n\n![Vulnerability report](https://res.cloudinary.com/about-gitlab-com/image/upload/v1773844787/i0eod01p7gawflllkgsr.png)\n\n\nGitLab Duo Agent Platform's assessment is a recommendation. You stay in control of every false positive to determine if it is valid, and you can audit the agent's reasoning at any time to build confidence in the model. \n\n\n## Turn vulnerabilities into automated fixes\n\nKnowing that a vulnerability is real is only half the work.  Remediation still requires understanding the code path, writing a safe patch, and making sure nothing else breaks.\n\nIf the vulnerability is identified as likely not be a false positive by the SAST false positive detection flow, the Agentic SAST vulnerability resolution flow automatically:\n\n1. Reads the vulnerable code and surrounding context from your repository  \n2. Generates high-quality proposed fixes  \n3. Validates fixes through automated testing   \n4. Opens a merge request with a proposed fix that includes:  \n   * Concrete code changes  \n   * A confidence score  \n   * An explanation of what changed and why\n\nIn this demo, you’ll see how GitLab can automatically take a SAST vulnerability all the way from detection to a ready-to-review merge request. Watch how the agent reads the code, generates and validates a fix, and opens an MR with clear, explainable changes so developers can remediate faster without being security experts.\n\n\u003Ciframe src=\"https://player.vimeo.com/video/1174573325?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"GitLab 18.10 AI SAST False Positive Auto Remediation\">\u003C/iframe>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\nAs with any AI-generated suggestion, you should review the proposed merge request carefully before merging.\n\n## Surface real secrets\n\nSecret detection is only useful if teams trust the results. When reports are full of test credentials, placeholder values, and example tokens, developers may waste time reviewing noise instead of fixing real exposures. That can slow remediation and decrease confidence in the scan.\n\nSecret false positive detection helps teams focus on the secrets that matter so they can reduce risk faster. When it runs on the default branch, it will automatically:\n\n1. Analyze each finding to spot likely test credentials, example values, and dummy secrets  \n2. Assign a confidence score for whether the finding is a real risk or a likely false positive  \n3. Generate an explanation for why the secret is being treated as real or noise  \n4. Add a badge in the Vulnerability Report so developers can see the status at a glance\n\nDevelopers can also trigger this analysis manually from the Vulnerability Report by selecting **“Check for false positive”** on any secret detection finding, helping them clear out findings that do not pose risk and focus on real secrets sooner.\n\n## Try AI-powered security today\n\nGitLab 18.10 introduces capabilities that cover the full vulnerability workflow, from cutting false positive noise in SAST and secret detection to automatically generating merge requests with proposed fixes.\n\nTo see how AI-powered security can help cut review time and turn findings into ready-to-merge fixes, [start a free trial of GitLab Duo Agent Platform today](https://about.gitlab.com/gitlab-duo-agent-platform/?utm_medium=blog&utm_source=blog&utm_campaign=eg_global_x_x_security_en_).",[785,796,825],{"featured":690,"template":833,"slug":1201},"gitlab-18-10-brings-ai-native-triage-and-remediation",{"category":806,"slug":808,"posts":1203},[1204,1215,1228],{"content":1205,"config":1213},{"body":1206,"title":1207,"description":1208,"category":808,"tags":1209,"authors":1210,"heroImage":1211,"date":1212},"\n***Note: The GitLab product did not use any of the compromised package versions mentioned in this post.***\n\nIn the span of 12 days, four separate supply chain attacks revealed that continuous integration and continuous delivery (CI/CD) pipelines have become a high-value target for sophisticated threat actors.\n\nBetween March 19 and March 31, 2026, threat actors compromised:\n\n* an open-source security scanner (Trivy)\n* an infrastructure-as-code (IaC) security scanner (Checkmarx KICS)\n* an AI model gateway (LiteLLM)\n* a JavaScript HTTP client (axios)\n\nEach attack shared the same surface: the build pipeline.\nThis article shows [what happened](#trusted-by-millions-compromised-in-minutes), [why pipelines can be uniquely vulnerable](#the-patterns-behind-these-attacks), and how centralized policy enforcement with GitLab — using policies defined below — can [block, detect, and contain these classes of attack](#how-gitlab-pipeline-execution-policies-address-each-attack-pattern) before they reach production.\n\n\n## Trusted by millions, compromised in minutes\n\nHere is the timeline of the supply chain attacks:\n\n### March 19: Trivy security scanner becomes an attack vector\n\n[Trivy](https://github.com/aquasecurity/trivy) is one of the most widely used open-source vulnerability scanners in the world. It is the tool teams run *inside their pipelines* to find vulnerabilities.\n\nOn March 19, a threat actor group known as [TeamPCP used compromised credentials](https://www.aquasec.com/blog/trivy-supply-chain-attack-what-you-need-to-know/) to force-push malicious code into 76 of 77 version tags of the `aquasecurity/trivy-action` GitHub Action and all 7 tags of `aquasecurity/setup-trivy`. Simultaneously, they published a trojanized Trivy binary (v0.69.4) to official distribution channels. The payload was credential-stealing malware that harvested environment variables, cloud tokens, SSH keys, and CI/CD secrets from every pipeline that ran a Trivy scan.\n\nThe incident was assigned [CVE-2026-33634](https://nvd.nist.gov/vuln/detail/CVE-2026-33634) with a CVSS score of 9.4. The Cybersecurity and Infrastructure Security Agency (CISA) added it to the Known Exploited Vulnerabilities catalog within days.\n\n### March 23: Checkmarx KICS falls next\nUsing stolen credentials, TeamPCP pivoted to Checkmarx’s open-source KICS (Keeping Infrastructure as Code Secure) project. They compromised the `ast-github-action` and `kics-github-action` GitHub Actions, [injecting the same credential-stealing malware](https://thehackernews.com/2026/03/teampcp-hacks-checkmarx-github-actions.html). Between 12:58 and 16:50 UTC on March 23, any CI/CD pipeline referencing these actions was silently exfiltrating sensitive data, such as API keys, database passwords, cloud access tokens, SSH keys, and service account credentials.\n\n### March 24: LiteLLM compromised via stolen Trivy credentials\n\nLiteLLM, an LLM API proxy with 95 million monthly downloads, was the next target. TeamPCP [published backdoored versions](https://thehackernews.com/2026/03/teampcp-backdoors-litellm-versions.html) (1.82.7 and 1.82.8) to PyPI using credentials harvested from LiteLLM’s own CI/CD pipeline, which used Trivy for scanning.\n\nThe malware targeting Version 1.82.7 used a base64-encoded payload injected directly into `litellm/proxy/proxy_server.py` that executed at import time. The version targeting 1.82.8 used a `.pth` file, a Python mechanism that executes automatically during interpreter startup. Simply installing LiteLLM was enough to trigger the payload. Attackers encrypted the stolen data (SSH keys, cloud tokens, .env files, cryptocurrency wallets) and exfiltrated it to `models.litellm.cloud`, a lookalike domain.\n\n### March 31: Source code for AI coding assistant leaked via simple packaging mistake\nWhile the TeamPCP campaign was still unfolding, a software company shipped an npm package containing a 59.8 MB source map file — one that referenced its AI coding assistant's complete, unminified TypeScript source code, hosted in the company's own Cloudflare R2 bucket.\n\nThe leak exposed 1,900 TypeScript files, 512,000+ lines of code, 44 hidden feature flags, unreleased model codenames, and the full system prompt for anyone who knew where to look. As engineer [Gabriel Anhaia explained](https://dev.to/gabrielanhaia/claude-codes-entire-source-code-was-just-leaked-via-npm-source-maps-heres-whats-inside-cjo), “A single misconfigured .npmignore or files field in package.json can expose everything.”\n### March 31: axios and another trojan in the supply chain\nThat same day, a sophisticated campaign [targeted the axios npm package](https://thehackernews.com/2026/03/axios-supply-chain-attack-pushes-cross.html), a JavaScript HTTP client with over 100 million weekly downloads.\n\nA compromised maintainer account published backdoored versions (1.14.1 and 0.30.4). It injected a malicious dependency (`plain-crypto-js@4.2.1`) that deployed a Remote Access Trojan capable of running on macOS, Windows, and Linux. Both release branches were hit within 39 minutes, with the malware designed to self-destruct after execution.\n\n## The patterns behind these attacks\n\nAcross these five incidents, three distinct attack patterns emerge, and all of them exploit the implicit trust that CI/CD pipelines place in their inputs.\n\n### Pattern 1: Poisoned tools and actions\n\nThe TeamPCP campaign exploited a fundamental assumption: that the security tools running *inside* your pipeline are themselves trustworthy. When a GitHub Action tag or a PyPI package version resolves to malicious code, the pipeline executes it with full access to environment secrets, cloud credentials, and deployment tokens. There is no verification step because the pipeline trusts the tag.\n\n**A recommended pipeline-level control:** Pin tools and actions to immutable references (commit SHAs or image digests) rather than mutable version tags. Where pinning is not practical, verify the integrity of tools and dependencies against known-good checksums or signatures. Block execution if verification fails.\n\n### Pattern 2: Packaging misconfigurations that leak IP\n\nA misconfigured build pipeline shipped debugging artifacts straight into the production package. A misconfigured `.npmignore` or files field in package.json is all it takes. A pre-publish validation step should catch this every time.\n\n**A recommended pipeline-level control:** Before any package is published, run automated checks that validate the package contents against an allowlist, flag unexpected files (source maps, internal configs, .env files), and block the publish step if the checks fail.\n\n### Pattern 3: Vulnerabilities in transitive dependencies\n\nThe axios attack targeted not just direct users of axios, but anyone whose dependency tree resolved to the compromised version. A single poisoned dependency in a lockfile can thus propagate through an entire organization’s build infrastructure.\n\n**A recommended pipeline-level control:** Compare dependency checksums against known-good lockfile state. Detect unexpected new dependencies or version changes. Block builds that introduce unverified packages.\n\n## How GitLab Pipeline Execution Policies address each attack pattern\n\nGitLab Pipeline Execution Policies ([PEPs](https://docs.gitlab.com/user/application_security/policies/pipeline_execution_policies/)) enable security and platform teams to inject mandatory CI/CD jobs into every pipeline across an organization, regardless of what a developer defines in their `.gitlab-ci.yml`. Jobs defined in PEPs cannot be skipped, even with `[skip ci]` or `[no_pipeline]` directives. Jobs can be executed in *reserved* stages (`.pipeline-policy-pre` and `.pipeline-policy-post`) that bookend the developer’s pipeline.\n\nWe have published ready-to-use pipeline execution policies for all three patterns as an open-source project: [Supply Chain Policies](https://gitlab.com/gitlab-org/security-risk-management/security-policies/projects/supply-chain-policies). These policies are independently deployable, and each one ships with violation samples that you can use to test them. Here is how each one works.\n\n### Use case 1: Prevent accidental exposure in package publishing\n\n**Problem:** A source map file ended up in the npm package of an AI coding tool after the build pipeline skipped publish-time validation.\n\n**PEP approach:** We built an open-source Pipeline Execution Policy for exactly this class of error: [Artifact Hygiene](https://gitlab.com/gitlab-org/security-risk-management/security-policies/projects/supply-chain-policies/-/blob/main/artifact-hygiene.gitlab-ci.yml?ref_type=heads).\n\nThe policy injects `.pipeline-policy-pre` jobs that auto-detect the artifact type (npm package, Docker image, or Helm chart) and inspect the contents before any publish step runs. For npm packages, it performs three checks:\n\n1. **File pattern blocklist.** Scans npm pack output for source maps (.map), test directories, build configs, IDE settings, and src/ directories.\n\n2. **Package size gate.** Blocks packages exceeding 50 MB, like the 59.8 MB package that leaked the AI tool.\n\n3. **sourceMappingURL scan.** Detects external URLs (the R2 bucket pattern that exposed a major AI company’s source), inline data: URIs, and local file references embedded in JavaScript bundles.\n\nWhen violations are found, the pipeline fails with a clear report in the failed CI job logs:\n```text\n=============================================\nFAILED: 3 violation(s) found\n=============================================\nBLOCKED: dist/index.js.map (matched: \\.map$)\nBLOCKED: dist/index.js contains external sourceMappingURL\nBLOCKED: dist/utils.js contains inline sourceMappingURL\n\nThis check is enforced by a Pipeline Execution Policy. If this is a false positive, contact the security team to update the policy project or exclude this project.\n```\nThe policy has no user-configurable CI variables. Developers cannot disable or bypass it. Exceptions are managed by the security team at the policy level, ensuring a deliberate process and a clean audit trail.\n\nThe repository includes a test project with intentional violations (examples/leaky-npm-package/) so you can see the policy in action before deploying it to your organization. The [README](https://gitlab.com/gitlab-org/security-risk-management/security-policies/projects/supply-chain-policies/-/blob/main/README.md) includes a complete quick-start guide for setup and deployment.\n\n**What this catches:** Any one of these controls would likely have prevented the AI company's source code leak:\n\n* The source map file triggers the file pattern blocklist.\n* Its 59.8 MB size triggers the size gate.\n* The sourceMappingURL pointing to an external R2 bucket triggers the URL scan.\n\n### Use case 2: Detect dependency tampering and lockfile manipulation\n\n**Problem:** The axios attack introduced a malicious transitive dependency (`plain-crypto-js`) that executed a RAT on install. Anyone who ran npm install during the compromise window pulled in the trojan.\n\n**PEP approach:** The [Dependency Integrity policy](https://gitlab.com/gitlab-org/security-risk-management/security-policies/projects/supply-chain-policies/-/blob/main/dependency-integrity.gitlab-ci.yml) injects .pipeline-policy-pre jobs that auto-detect the package ecosystem (npm or Python) and perform three checks:\n\n**For npm projects** (triggered by `package-lock.json`, `yarn.lock`, or `pnpm-lock.yaml`):\n\n1. **Lockfile integrity.** Runs `npm ci --ignore-scripts`, which fails if `node_modules` would differ from what the lockfile specifies. This catches cases where package.json was updated but the lockfile was not regenerated, and also verifies SRI integrity hashes.\n2. **Blocked package scan.** Cross-references the lockfile’s full dependency tree against `blocked-packages.yml`, a GitLab-maintained list of known-compromised package versions. The shipped blocklist includes `axios@1.14.1`, `axios@0.30.4`, and `plain-crypto-js@4.2.1`.\n3. **Undeclared dependency detection.** After install, compares the contents of node_modules against the lockfile. Any package present on disk but absent from the lockfile indicates tampering (e.g., a compromised postinstall script that fetches additional packages).\n\n**For Python projects** (triggered by `requirements.txt`, `Pipfile.lock`, `poetry.lock`, or `uv.lock`):\n\n1. **Lockfile integrity.** Installs in an isolated virtual environment and verifies that the install succeeds from the lockfile.\n2. **Blocked package scan.** Same blocklist approach. The shipped list includes `litellm==1.82.7` and `litellm==1.82.8`.\n3. **.pth file detection.** Scans site-packages for `.pth` files containing executable code patterns (`import os`, `exec(`, `eval(`, `__import__`, `subprocess`, `socket`). This is the exact mechanism the LiteLLM backdoor used.\n\nWhen a violation is found:\n\n```text\n=============================================\nFAILED: 1 violation(s) found\n=============================================\nBLOCKED: axios@1.14.1 is a known-compromised package\n\nThis check is enforced by a Pipeline Execution Policy.\n```\n\nThe policy runs in *strict mode*: any dependency not present in the committed lockfile blocks the pipeline. If a developer needs to add a dependency, they commit the updated lockfile. The policy verifies that the installed version matches the committed version. If something appears that was not committed (e.g., a transitive dependency injected via a compromised upstream package), the pipeline blocks.\n\n**What this catches:** The introduction of `plain-crypto-js` as a new, previously unseen dependency would be flagged by the undeclared dependency check. The `axios@1.14.1` version would be caught by the blocked package scan. The LiteLLM `.pth` file would be caught by the `.pth` detection check. Each attack has at least one, and often two, independent detection signals.\n\n### Use case 3: Detect and block compromised tools before execution\n\n**Problem:** TeamPCP replaced trusted Trivy and Checkmarx GitHub Action tags with malicious versions. Any pipeline referencing those tags executed credential-stealing malware.\n\n**PEP approach:** The [Tool Integrity policy](https://gitlab.com/gitlab-org/security-risk-management/security-policies/projects/supply-chain-policies/-/blob/main/tool-integrity.gitlab-ci.yml) injects a `.pipeline-policy-pre` job that queries the GitLab CI Lint API (or falls back to evaluate the `.gitlab-ci.yml`), extracts the container image references, and compares it against an approved images allowlist maintained by the security team.\n\nThe allowlist (`approved-images.yml`) supports three controls per image:\n\n**Approved repositories:** Only images from repositories on the list are permitted. An unknown repository blocks the pipeline.\n\n**Allowed tags:** Only specific tags are permitted within an approved repository. This prevents drift to untested versions.\n\n**Blocked tags:** Known-compromised versions can be explicitly blocked even if the repository is approved. The shipped allowlist blocks `aquasec/trivy:0.69.4` through `0.69.6`, the exact versions TeamPCP trojanized.\n\nWhen a violation is found, the pipeline fails before any other job runs:\n\n```text\n=============================================\nFAILED: 1 violation(s) found\n=============================================\nBLOCKED: aquasec/trivy:0.69.4 (job: trivy-scan)\n\n - tag '0.69.4' is known-compromised\n\nThis check is enforced by a Pipeline Execution Policy.\n```\n\nThe allowlist is maintained via MRs against the policy project. To add a new approved image, the security team opens an MR. To respond to a new compromise, they add a blocked tag. No code changes required, just YAML.\n\n**What this catches:** When images with unapproved tags are detected, the policy compares the image repository names and tags to an allowlist. A failed match blocks the pipeline before any scanner executes, preventing credential exfiltration.\n\n*Note: By extending the sample above, PEPs can be used to force pinning to digests over tags, which is immune to force pushes. This sample demonstrates a more basic tag-based enforcement pattern.*\n\n## Beyond PEPs: GitLab’s supply chain defenses\n\nPipeline Execution Policies are the enforcement layer, but they work best as part of a broader defense-in-depth strategy. GitLab provides several capabilities that complement PEPs for supply chain protection:\n\n### Secret detection\n\n[GitLab secret detection](https://docs.gitlab.com/user/application_security/secret_detection/) prevents credentials from landing in the repository in the first place, significantly reducing what a compromised pipeline tool can harvest. In the context of the March 2026 attacks:\n\n* Credentials stored in repositories are both easier for attackers to discover and slower to rotate. The Trivy incident showed that even the rotation process can be exploited: Aqua Security's rotation was not atomic, and the attacker captured newly issued tokens before the old ones were fully revoked. GitLab Secret Detection includes automatic revocation for leaked GitLab tokens and a partner API that notifies third-party providers to revoke their credentials, accelerating response when a breach does occur.\n\n* Secret detection combined with proper secret management (short-lived tokens, vault-backed credentials, minimal pipeline secret exposure) limits what an attacker can reach even when a trusted tool turns hostile.\n\n### Dependency scanning via software composition analysis (SCA)\n\nGitLab [dependency scanning](https://docs.gitlab.com/user/application_security/dependency_scanning/) identifies known vulnerabilities in project dependencies by analyzing lockfiles and manifests. In the context of the March 2026 attacks:\n\n* For LiteLLM, the compromised versions (1.82.7, 1.82.8) are tracked in GitLab's advisory database, flagging affected Python projects automatically.\n\n* For axios, dependency scanning identifies the compromised versions (1.14.1, 0.30.4) across every project in the organization, giving security teams a single view for assessing blast radius and prioritizing credential rotation.\n\n* Similarly, all npm packages compromised by TeamPCP's CanisterWorm propagation are also flagged if used.\n\n[GitLab Container Scanning](https://docs.gitlab.com/user/application_security/container_scanning/) detects vulnerable container images used in your deployments. For the Trivy compromise, Container Scanning flags the trojanized Trivy Docker images (0.69.4 through 0.69.6) when they appear in your container registry or deployment manifests.\n\n### Merge request approval policies\n\n[Merge request approval policies](https://docs.gitlab.com/user/application_security/policies/merge_request_approval_policies/) can require security team approval before changes to dependency lockfiles or CI/CD configurations are merged. This ensures a human checkpoint for the types of changes that supply chain attacks typically introduce.\n\n### Coming soon: Dependency Firewall, Artifact Registry, and SLSA Level 3 Attestation & Verification\n\nUpcoming GitLab supply chain security capabilities harden policy enforcement at two critical control points: the registry and the pipeline. The Dependency Firewall and Artifact Registry will block non-conforming packages, while SLSA Level 3 attestation will provide cryptographic proof that artifacts were produced by approved pipelines and remain unmodified. Together, they will give security teams verifiable control over what enters and exits the software supply chain.\n\n## What this means for your organization\n\nAmidst rising AI-assisted threats, attacks on CI/CD pipelines are becoming commonplace. The TeamPCP campaign shows how a single compromised credential can cascade across an ecosystem of trusted tools.\n\nIf your organization used any of the affected components, operate with the assumption that all of your pipeline secrets were exposed: rotate them immediately and audit systems for persisted backdoors. Either way, regularly rotating credentials and using short-lived tokens limits the blast radius of any future compromise.\n\nHere is what we recommend:\n\n1. **Pin dependencies to checksums, when possible.** Mutable version tags (like the ones TeamPCP hijacked) are not a security boundary. Use SHA-pinned references for all [CI/CD components](https://docs.gitlab.com/ci/components/#manage-dependencies) or actions and container images.\n\n2. **Run pre-execution integrity checks.** Use Pipeline Execution Policies to verify tool and dependency integrity *before* any pipeline job runs. This is the `.pipeline-policy-pre` stage.\n\n3. **Audit what you publish.** Every package publish step should include automated validation of the artifact contents. Source maps, environment files, and internal configuration should never leave your build environment. The [Supply Chain Policy](https://gitlab.com/gitlab-org/security-risk-management/security-policies/projects/supply-chain-policies) project provides a ready-to-deploy starting point for npm, Docker, and Helm artifacts.\n\n4. **Detect dependency drift.** Compare dependency resolutions against committed lockfiles on every pipeline run. Monitor for unexpected new dependencies.\n\n5. **Centralize policy management.** Do not rely on developers remembering to include security checks. Enforce them at the group or instance level through policies that developers cannot remove or skip.\n\n6. **Assume your security tools are targets.** If your vulnerability scanner, static application security testing (SAST) tool, or AI gateway can be compromised, it will be. Limit each tool to its least necessary privileges and verify that it can't reach anything else.\n\n## Protect your pipelines with GitLab\n\nOver two weeks, attackers compromised production pipelines at organizations running some of the most widely adopted tools in the software development ecosystem.\n\nThe lesson is clear: Build pipelines need the same degree of centralized, policy-driven protection that we apply to networks and cloud infrastructure.\n\nGitLab Pipeline Execution Policies provide that enforcement layer. They ensure that security checks run on every pipeline, in every project regardless of individual project configurations. Combined with dependency scanning, secret detection, and merge request approval policies, they can block, detect, and contain the class of attacks we saw in March 2026.\n\nThe [Supply Chain Policies](https://gitlab.com/gitlab-org/security-risk-management/security-policies/projects/supply-chain-policies) project provides a working Pipeline Execution Policy that catches the exact class of error behind the major AI company’s leak, with coverage for npm packages, Docker images, and Helm charts. Clone it, deploy it to your group, and ensure that all of your pipelines are ready for the supply chain attacks to come.\n\nTo get started with centralized pipeline policies, sign up for a [free trial of GitLab Ultimate](https://about.gitlab.com/free-trial/devsecops/).\n\n\n*This blog post contains \"forward-looking statements\" within the meaning of Section 27A of the Securities Act of 1933, as amended, and Section 21E of the Securities Exchange Act of 1934. Although we believe that the expectations reflected in these statements are reasonable, they are subject to known and unknown risks, uncertainties, assumptions and other factors that may cause actual results or outcomes to differ materially. Further information on these risks and other factors is included under the caption \"Risk Factors\" in our filings with the SEC. We do not undertake any obligation to update or revise these statements after the date of this blog post, except as required by law.*","Pipeline security lessons from March supply chain incidents","Learn how centralized pipeline policies can detect and block the patterns behind a series of recent attacks.",[796,785,886,825],[1184],"https://res.cloudinary.com/about-gitlab-com/image/upload/v1772630163/akp8ly2mrsfrhsb0liyb.png","2026-04-07",{"featured":690,"template":833,"slug":1214},"pipeline-security-lessons-from-march-supply-chain-incidents",{"content":1216,"config":1226},{"body":1217,"category":808,"date":1218,"tags":1219,"title":1221,"description":1222,"authors":1223,"heroImage":1225},"After an incident wraps up, every incident response or security operations center faces the same uncomfortable question: What did we miss, and why? Answering that question well takes real work — someone has to read through the incident timeline, map the attacker's actions to detection opportunities, identify the alerts that should have fired but didn't, and translate those findings into concrete improvements. Done manually, it's time-consuming, inconsistent, and easy to deprioritize when the next incident is already knocking.\n\nAt GitLab, our Signals Engineering team is responsible for building and maintaining the detections that protect the platform and the company. We deal with the same detection gap problem that every security team does so we’ve automated detection gap analysis with [GitLab Duo Agent Platform](https://about.gitlab.com/gitlab-duo-agent-platform/) to improve our assessment of those gaps and how we can close them.\n\nIn this article, you'll learn our strategy, which includes two AI agents you can use in your environment: the built-in Security Analyst Agent and a custom agent we built and named the Detection Engineering Assistant.\n\n\n## The detection gap problem\n\nA detection gap is exactly what it sounds like: an attacker took an action, and your detections didn't catch it. Gap analysis is the process of systematically reviewing security incidents to identify those missed opportunities and determine what new or improved detections would close them.\n\nThe challenge isn't that gap analysis is conceptually hard. It's that it requires careful, methodical reading of incident data and mapping those events to your detection coverage. For a single incident, a skilled analyst can do it well. But across a steady stream of incidents, with multiple engineers contributing, it's difficult to maintain consistency and easy to let the review become shallow.\n\nWe wanted a process that was repeatable, thorough, and embedded directly in the workflow where our security incidents already live: GitLab issues.\n\n## What is GitLab Duo Agent Platform?\n\n[GitLab Duo Agent Platform](https://about.gitlab.com/blog/gitlab-duo-agent-platform-is-generally-available/) is GitLab's framework for building and deploying agentic AI agents that can reason, take actions, and integrate natively with GitLab resources like issues, merge requests, and code. Unlike a simple chat interface, agents in Duo Agent Platform can be given specific roles, domain knowledge, and access to tools, making them effective for domain-specific workflows like security operations.\n\nGitLab Duo Agent Platform gives you two practical paths:\n\n1. **Use a pre-built agent** — GitLab ships several out-of-the-box agents, including a Security Analyst Agent designed for security-related tasks.  \n2. **Build your own agent** — You can create a custom agent in just a few minutes by giving it a name, a description, and a system prompt. The system prompt is where the real power lies.\n\nBoth paths are viable for detection gap analysis. Let's look at each.\n\n## 1. Security Analyst Agent\n\nThe easiest way to get started is with [Security Analyst Agent](https://docs.gitlab.com/user/duo_agent_platform/agents/foundational_agents/security_analyst_agent/), which comes pre-configured with security domain knowledge and can be invoked directly from a GitLab issue.\n\nTo use the agent for gap analysis, we navigate to a closed incident issue and ask the agent to review the incident description, timeline, tasks, and comments to identify where detections were absent or insufficient. The agent reads the issue content — including comments, linked artifacts, and timeline details — and reasons over it to surface potential gaps. It can identify undetected tactics, techniques, and procedures (TTPs) mapped to MITRE ATT&CK and suggest areas where new detection rules could improve coverage.\n\nThis works well for a quick first pass, especially if your incident issues are well-documented. Security Analyst Agent is knowledgeable about general security concepts, common attacker behaviors, and detection principles. For teams just getting started with AI-assisted operations, it provides immediate value with no configuration required.\n\nThat said, the pre-built agent doesn't know your specific environment, including your SIEM, your log sources, your detection stack, or your team's detection engineering standards. For us, that meant the recommendations, while valid in general, sometimes missed the specific context we needed to translate them into actionable detections. That's what led us to build our own agent.\n\n## 2. Building the Detection Engineering Assistant\n\n[Creating a custom agent in GitLab Duo Agent Platform](https://docs.gitlab.com/user/duo_agent_platform/agents/custom/) is surprisingly straightforward. From the Duo Agent Platform interface, you give the agent a name (we called ours the **Detection Engineering Assistant**), a brief description, and a system prompt. That's it. The agent is ready to use.\n\nThe system prompt is the most important part. It's the agent's knowledge base: everything it knows about your team, your environment, your standards, and how it should reason about its work. A thin, vague system prompt produces thin, vague output. A verbose, carefully crafted system prompt produces an agent that behaves like a knowledgeable member of your team.\n\nHere's the approach we took when writing our system prompt for the Detection Engineering Assistant:\n\n### Define the agent's role and scope clearly\n\nWe opened the system prompt by telling the agent exactly what it is and what it's responsible for. Not just \"you are a security analyst.\" We specifically prompted: \"You are a detection engineering assistant for GitLab's Signals Engineering team, responsible for analyzing security incidents and identifying gaps in our detection coverage.\" This framing anchors every response it produces.\n\n### Encode your detection philosophy\n\nWe wrote out what \"a good detection\" means to us: low false positive rates, high signal fidelity, and actionable alerts that provide responders with the context they need. We explained our preference for behavioral detections over IOC-based detections where possible, and described how we think about the tradeoff between coverage breadth and alert fatigue.\n\n### Give it context on your tech stack and log sources\n\nAn agent can only recommend what you can actually build. We told the agent which log sources we ingest, what our SIEM looks like, and what data is and isn't available to us. This means when it recommends a new detection, it does so in terms of what we can actually implement, not hypothetical telemetry we don't have.\n\n### Ground it in MITRE ATT&CK\n\nWe told the agent to organize its gap findings using ATT&CK tactics and techniques. This gives us consistent, structured output that maps directly to how we track coverage internally, and makes it easy to prioritize which gaps to address first.\n\n### Set expectations for output format\n\nWe specified exactly what we want the agent to produce: a structured list of detection gaps, each with the relevant ATT&CK technique, a description of what was missed, the log source or data that could support a detection, and a recommended approach. A consistent output format makes the findings easier to triage and turn into engineering work.\n\n### Example system prompt excerpt\n\n*Note: Our full Detection Engineering Assistant system prompt is 1,870 words and 337 lines. The example below is just a small example of what a full custom system prompt can be.* \n\n\n```text\nYou are the Detection Engineering Assistant for GitLab's Security Operations team. Your role is to analyze closed security incidents and identify gaps in our detection capabilities.\n\nWhen reviewing an incident, you should:\n1. Identify each distinct attacker action or technique described in the incident timeline\n2. For each action, assess whether our existing detections would have caught it\n3. For any action that would not have been detected, document it as a detection gap\n\nFor each gap, provide:\n- MITRE ATT&CK Technique ID and name (e.g., T1078 - Valid Accounts)\n- A plain-language description of what happened and why it wasn't detected\n- The log source or telemetry that could support a detection (e.g., authentication logs, process execution events, network flow data)\n- A recommended detection approach, written in terms our SIEM can implement\n\nOur SIEM ingests [log sources]. Our detection standards prioritize behavioral patterns over static IOCs. Avoid recommending detections that would generate significant false positives without a high-confidence tuning path...\n```\n\nA system prompt this specific produces dramatically more useful output than a generic one. The agent stops giving you general security advice and starts giving you detection engineering recommendations.\n\n## Running gap analysis on incidents\n\nWith the Detection Engineering Assistant configured, the workflow is simple. At the close of an incident, we open the incident issue in GitLab and invoke the assistant. It reads the full issue — the incident summary, timeline, investigative notes, and any linked resources — and returns a structured gap analysis.\n\nA typical output looks like this:\n\n**Gap: Lateral movement via valid credentials not detected**\n\n* **ATT&CK:** T1078.004 — Valid Accounts: Cloud Accounts  \n* **What happened:** An attacker used a valid access token to authenticate to an auxiliary GitLab instance. No alert fired because we lacked authentication baseline detections for that instance.  \n* **Log source:** Authentication logs from `example.gitlab.com`  \n* **Recommended approach:** Create a detection that alerts on first-time authentication from a user account to `example.gitlab.com` within a 90-day rolling window, with suppression for accounts with established access patterns.\n\nThis kind of structured output goes directly into our engineering backlog. We treat the agent's analysis as a high-quality first draft. It gets reviewed by a human engineer who validates the findings, checks whether gaps are already covered by detections we haven't documented, and adds context before it becomes an engineering issue. But the hard work of reading the incident and generating the initial findings is automated.\n\n## What we've learned\n\nA few things stand out from building and iterating on this workflow:\n\n**The system prompt is a living document** — Every time the agent produces an output that misses something obvious or gets the framing wrong, we update the prompt. The agent's quality is a direct reflection of how well we've encoded our domain knowledge into it.\n\n**Incident documentation quality matters** — An agent can only reason over what's written down. Incidents with detailed, structured timelines produce much better gap analysis than sparse or informal ones. Building the gap analysis workflow created an unexpected second benefit: it gave us a concrete reason to improve our incident documentation standards.\n\n**This is a force multiplier, not a replacement** — The Detection Engineering Assistant doesn't replace a skilled detection engineer, but it does amplify one. The engineer still reviews the findings, validates the recommendations, and makes the final call on what goes into the backlog. But the time spent on the initial analysis drops significantly, and the consistency across incidents improves.\n\n## Get started\n\nIf you want to build your own detection gap analysis agent, here's where to start:\n\n1. Review your last three to five closed incidents and note what a good gap analysis would have surfaced for each.  \n2. Use those observations to draft a system prompt that encodes your environment, standards, and preferred output format.  \n3. Create a [custom agent](https://docs.gitlab.com/user/duo_agent_platform/agents/custom/) in GitLab Duo Agent Platform with your prompt.  \n4. Run it against one of your incidents and iterate on the prompt based on the output.\n\nThe detection gap problem isn't going away. But with GitLab Duo Agent Platform, you can make the analysis repeatable, consistent, and embedded directly in the place where your security work already happens. \n\n> Start [a free trial of GitLab Duo Agent Platform](https://about.gitlab.com/gitlab-duo-agent-platform/) today!\n","2026-03-10",[796,1220,886,704,825,785,516],"security research","Automating detection gap analysis with GitLab Duo Agent Platform","Learn how GitLab's Signals Engineering team uses our AI platform to automatically surface detection gaps from security incidents — no manual review required.",[1224],"Matt Coons","https://res.cloudinary.com/about-gitlab-com/image/upload/v1773147991/op5xyroonltdwqix0x3u.png",{"featured":13,"template":833,"slug":1227},"automating-detection-gap-analysis-with-gitlab-duo-agent-platform",{"content":1229,"config":1237},{"title":1230,"description":1231,"authors":1232,"heroImage":1211,"date":1234,"body":1235,"category":808,"tags":1236},"How GitLab built a security control framework from scratch","GitLab's Security Compliance team created a custom control framework to scale across multiple certifications and products — here's why and how you can, too.\n",[1233],"Davoud Tu","2026-03-04","GitLab's Security Compliance team discovered that existing security control frameworks lacked the customization to fit the platform's multi-product, cloud-native environment.\n\nSo we built our own.\n\nHere's what we learned and why creating your own custom security control framework might be the right move for your compliance program.\n\n## The journey through frameworks\n\nWhen I joined GitLab's Security Compliance team in November 2022, we were using the [Secure Controls Framework](https://securecontrolsframework.com/) to manage controls across our external certifications and internal compliance needs. But as our requirements grew, we realized we needed something more comprehensive. \n\nWith FedRAMP authorization on our roadmap, we chose to adopt [NIST SP 800-53](https://csrc.nist.gov/pubs/sp/800/53/r5/upd1/final) next. NIST SP 800-53 includes more than 1,000 controls, but its comprehensiveness isn’t perfectly suited to GitLab’s environment.\n\nWe didn't need to implement every NIST control, only those applicable to our specific requirements. Our focus was on the quality of controls rather than quantity. Implementing unnecessary controls doesn't improve security; in fact, too many can make an environment less secure as individuals find ways to circumvent overly restrictive or irrelevant controls. \n\nSome controls also lacked the necessary granularity for our needs. For example, NIST’s AC-2 “Account Management” control covers account creation and provisioning, account modification and disabling, account removal and termination, shared and group account management, and account monitoring and reviews.\n\nIn practice, these are _at least_ six distinct controls with different owners, testing procedures, and risks. For attestations like SOC 2, each activity is tested as a separate control because they have different evidence requirements and operational contexts. NIST's all-encompassing AC-2 didn't match how we actually operate controls or how auditors actually assess us, and we needed controls granular enough to reflect our operational environment.  \n\nWe found ourselves constantly customizing, adding, and adapting NIST controls to fit our environment. At some point, we realized we weren't really using NIST SP 800-53 anymore, we were building our own framework on top of it. We decided a custom control framework, one tailored to GitLab’s environment, would best accommodate our multi-product offering and each product’s unique compliance needs.\n\n## Building the GitLab Control Framework\n\nThrough five methodical steps, we built our own common controls framework: the GitLab Control Framework (GCF).\n\n### 1. Analyze what we need\n\nWe reviewed our existing controls and mapped every requirement from external certifications we already maintained, certifications on our roadmap, and our internal compliance program: \n\n**External certifications:**\n\n* SOC 2 Type II  \n* ISO 27001, ISO 27017, ISO 27018, ISO 42001  \n* PCI DSS  \n* TISAX  \n* Cyber Essentials  \n* FedRAMP\n\n**Internal compliance needs:**\n\n* Controls for mission-critical systems that are not in-scope for external certifications   \n* Controls for systems with access to sensitive data\n\nThis gave us the baseline: what controls must exist to meet our compliance obligations.\n\n### 2. Learn from industry frameworks\n\nNext, we compared our requirements against industry-recognized frameworks:\n\n* NIST SP 800-53  \n* NIST Cybersecurity Framework (CSF)  \n* Secure Controls Framework (SCF)  \n* Adobe and Cisco Common Controls Framework (CCF)\n\nHaving adopted frameworks in the past, we wanted to learn from their structure and ensure we weren't missing critical security domains, controls, or best practices.\n\n### 3. Create custom control domains\n\nThrough this analysis, we created 18 custom control domains tailored to GitLab's environment:\n\n\n| Abbreviation | Domain | Scope of controls |\n| :---- | :---- | :---- |\n| AAM | Audit & Accountability Management | Logging, monitoring, and maintaining audit trails of system activities |\n| AIM | Artificial Intelligence Management | Specific to AI system development, deployment, and governance |\n| ASM | Asset Management | Identifying, tracking, and managing organizational assets |\n| BCA | Backups, Contingency, and Availability Management | Business continuity, disaster recovery, and system availability |\n| CHM | Change Management | Managing changes to systems, applications, and infrastructure |\n| CSR | Customer Security Relationship Management | Customer communication, transparency, and security commitments |\n| DPM | Data Protection Management | Protecting data confidentiality, integrity, and privacy |\n| EPM | Endpoint Management | Securing end-user devices and workstations |\n| GPM | Governance & Program Management | Security governance, policies, and program oversight |\n| IAM | Identity, Authentication, and Access Management | User identity, authentication mechanisms, and access control |\n| INC | Incident Management | Detecting, responding to, and recovering from security incidents |\n| ISM | Infrastructure Security Management | Network, server, and foundational infrastructure security |\n| PAS | Product and Application Security Management | Security capabilities built into the GitLab product that are dogfooded to secure GitLab's own development, such as branch protection & code security scanning |\n| PSM | People Security Management | Personnel security, training, and awareness |\n| SDL | Software Development & Acquisition Life Cycle Management | Secure SDLC practices and third-party software acquisition |\n| SRM | Security Risk Management | Risk assessment, treatment, and management |\n| TPR | Third Party Risk Management | Managing security risks from vendors and suppliers |\n| TVM | Threat & Vulnerability Management | Identifying and remediating security vulnerabilities |\n\n\u003Cbr>\u003C/br>\n\n\nEach domain groups related controls into logical families that align with how GitLab's security program is actually organized and operated. This structure provides a methodical approach for adding, updating, or removing controls as our needs evolve.\n\n### 4. Add context and data\n\nWith our domains defined, we needed to address two critical challenges: how to represent controls across multiple products without duplicating the framework, and how to capture meaningful implementation context to actually operate and audit at scale. \n\n#### Scaling across multiple products\n\nGitLab provides multiple product offerings: GitLab.com (multi-tenant SaaS on GCP), GitLab Dedicated (single-tenant SaaS on AWS), and GitLab Dedicated for Government (GitLab’s single-tenant FedRAMP offering on AWS). Each offering has different infrastructure, compliance scopes, and audit requirements. We needed to support product-specific audits without creating entirely separate frameworks.\n\nWe designed a control hierarchy where **Level 1 controls are the framework**, defining what should be implemented at the organizational level. **Level 2 controls are the implementation**, capturing the product-specific details of how each requirement is actually fulfilled.\n\n```mermaid\n%%{init: { \"fontFamily\": \"GitLab Sans\" }}%%\ngraph TD\n    accTitle: Control Hierarchy\n    accDescr: Level 1 requirements cascade to Level 2 implementations.\n    \n    L1[\"Level 1: Framework\u003Cbr/>What must be implemented\"];\n    L2A[\"Level 2: GitLab.com\u003Cbr/>How it's implemented\"];\n    L2B[\"Level 2: Dedicated\u003Cbr/>How it's implemented\"];\n    L2C[\"Level 2: Dedicated for Gov\u003Cbr/>How it's implemented\"];\n    L2D[\"Level 2: Entity\u003Cbr/>(inherited by all)\"];\n    \n    L1-->L2A;\n    L1-->L2B;\n    L1-->L2C;\n    L1-->L2D;\n```\n\n\u003Cbr>\u003C/br>\n\nThis separation allows us to maintain one framework with product-specific implementations, rather than managing duplicate frameworks for each offering. Entity controls apply organization-wide and are inherited by GitLab.com, GitLab Dedicated, and GitLab Dedicated for Government.\n\n#### Adding context to controls\n\nTraditional control frameworks track minimal information: a control ID, description, and owner. The GCF takes a different approach and its superpower is the extensive metadata we track for each control. Beyond just stating the control description or implementation statement, we capture:\n\n* Control owner: Who is accountable for the control and its risk?  \n* Environment: Does this apply organization-wide (Entity, inherited by all product offerings), to GitLab.com, or to Dedicated?  \n* Assets: What specific systems does this control cover?  \n* Frequency: How often is the control performed or tested?  \n* Nature: Is it manual, semi-automated, or fully automated?  \n* Classification: Is this for external certifications or internal risk?  \n* Testing details: How do we assess it? What evidence do we collect?\n\nThis context transforms the GCF from a simple control list into an operationalized control inventory.\n\nWith this structure, we can answer questions like: \n\n* Which controls apply to GitLab.com for our SOC 2 audit vs. GitLab Dedicated? → Filter by environment: GitLab.com  \n* What controls does the Infrastructure team own? → Filter by owner   \n* Which controls can we automate? → Filter by nature: Manual \n\n### 5. Iterate, mature, and scale\n\nThe GCF isn't static and was designed to evolve with our business and compliance landscape.\n\n#### Pursuing new certifications\n\nBecause we've operationalized context into the GCF, we can quickly determine the scope and gaps when pursuing new certifications (ISMAP, IRAP, C5, etc.): \n\n1. Determine scope: Which product has the business need (GitLab.com, GitLab Dedicated, or both)?\n2. Map requirements: Do existing controls already cover the new certification requirements?   \n3. Identify gaps: What new controls need to be created?  \n4. Update mappings: Link existing controls to the new certification requirements.\n\n#### Adapting to new regulations\n\nWhen new regulations emerge or existing requirements change: \n\n* Review existing controls: Does an existing control already cover the new requirement?   \n* Update or create: Either update existing control language or create a new control.  \n* Apply the most stringent: When multiple certifications have similar requirements, we implement the most stringent version — secure once, comply with many.\n* Map across certifications: Link the control to all relevant certification requirements.\n\n#### Managing control lifecycle\n\nThe framework adapts to various changes:\n\n* Requirement changes: When certifications update their requirements, we review impacted controls and update descriptions or mappings.\n* Deprecated controls: If a requirement is removed or a control is no longer needed, we mark it as deprecated and remove it from our monitoring schedule.  \n* New risks identified: Risk assessments may identify gaps requiring new internal controls.\n\n## The power of common controls: One control, multiple requirements\n\nSecuring once and complying with many isn't just a principle, it has tangible benefits across how we prepare for audits, support control owners, and pursue new certifications. Here's what that looks like in practice, both qualitatively and in the numbers. \n\n### Qualitative results\n\nSince implementing the GCF, we've seen significant improvements in how we manage compliance: \n\n#### Integrated audit approach\n\nThe GCF enables us to maintain one framework with controls mapped to multiple certification requirements, instead of managing separate control sets for each audit. One control can satisfy SOC 2, ISO 27001, and PCI DSS requirements simultaneously.\n\n#### Faster audit preparation\n\nThrough the GCF, we maintain one consolidated request list instead of separate lists for each audit. Because we've defined controls with specific context, our request lists say \"Okta user list\" instead of generic \"production user list,\" eliminating ambiguity and interpretation. We're not collecting “N/A” evidence or leaving it up to auditors to interpret what \"production\" means in our environment. Everything is already scoped to our actual systems.\n\n#### Reduced stakeholder burden\n\nThis integration directly reduces burden on our stakeholders. Control owners provide evidence once instead of responding to separate requests from SOC 2, ISO, and PCI auditors. When we collect evidence for access controls, it satisfies SOC 2, ISO 27001, and PCI DSS requirements simultaneously. One control, one test, one piece of evidence with multiple certifications and requirements satisfied.\n\n#### Efficient gap assessments\n\nWhen pursuing new certifications or launching new features, the operationalized context enables more efficient gap analysis. We can determine which controls already exist, what's missing, and what implementation is required. \n\n### Quantifiable results\n\n**Control efficiency:**\n\n* Reduced SOC controls by 58% (200 controls → 84\\) for GitLab.com and 55% (181 → 82) for GitLab Dedicated  \n* One framework now supports 8+ certifications \n\n**Audit efficiency:**\n\n* Consolidated 4 audit request lists into 1, reducing requests by 44% (415 → 231)  \n* 95% evidence acceptance rate before fieldwork for recent PCI audits\n\n**Framework scale:**\n\n* 220+ active controls across 18 custom domains  \n* Mapped to 1,300+ certification requirements  \n* Supports multiple product offerings\n\n## The path forward\n\nThe GCF continues to evolve as we add security and AI controls, pursue new certifications, and refine our approach. \n\n**For security compliance practitioners:** Don't be afraid to build your own framework if industry standards don't fit. The upfront investment pays dividends in scalability, efficiency, and controls that actually make sense for your environment. Sometimes the best framework is the one you design yourself.\n\n> If you found this helpful, check out our complete [GitLab Control Framework documentation](https://handbook.gitlab.com/handbook/security/security-assurance/security-compliance/sec-controls/), where we detail our framework methodology, control domains, and field structures.",[796,886],{"featured":13,"template":833,"slug":1238},"how-gitlab-built-a-security-control-framework-from-scratch",{"content":1240,"config":1243},{"title":866,"description":867,"authors":1241,"heroImage":870,"date":871,"body":872,"category":702,"tags":1242},[869],[704,785,262],{"featured":13,"template":833,"slug":875},[1245,1250,1257],{"content":1246,"config":1249},{"title":983,"description":984,"authors":1247,"heroImage":987,"date":988,"body":989,"category":551,"tags":1248},[986],[605,951],{"featured":690,"template":833,"slug":992},{"content":1251,"config":1255},{"title":1252,"description":1253,"heroImage":1136,"date":988,"tags":1254},"GitLab Patch Release: 18.11.2, 18.10.5","Learn about this release for GitLab Community Edition and Enterprise Edition.",[1138],{"featured":690,"template":833,"externalUrl":1256},"https://docs.gitlab.com/releases/patches/patch-release-gitlab-18-11-2-released/",{"content":1258,"config":1261},{"title":1021,"description":1022,"authors":1259,"heroImage":1025,"date":871,"body":1026,"category":750,"tags":1260},[1024],[89,785,886],{"featured":690,"template":833,"slug":1029},[1263,1268,1273],{"content":1264,"config":1267},{"title":904,"description":905,"heroImage":906,"authors":1265,"date":909,"body":910,"category":715,"tags":1266},[908],[785],{"featured":690,"template":833,"slug":913},{"content":1269,"config":1272},{"title":1097,"description":1098,"authors":1270,"heroImage":870,"date":1101,"body":1102,"category":774,"tags":1271},[1100],[704,247],{"featured":690,"template":833,"slug":1105},{"content":1274,"config":1276},{"title":1134,"description":1135,"heroImage":1136,"date":1101,"category":785,"tags":1275},[1138,1139],{"featured":690,"template":833,"externalUrl":1141},1777493629945]