[{"data":1,"prerenderedAt":1245},["ShallowReactive",2],{"/en-us/blog":3,"navigation-en-us":21,"banner-en-us":449,"footer-en-us":466,"footer-source-/en-us/blog/":711,"blogCategories-en-us":715,"relatedBlogPosts-en-us":826,"maineFeaturedPost-en-us":1210,"recentFeaturedPosts-en-us":1215,"recentPosts-en-us":1230},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"seo":8,"content":11,"config":13,"_id":15,"_type":16,"title":7,"_source":17,"_file":18,"_stem":19,"_extension":20},"/en-us/blog","en-us",false,"",{"title":9,"description":10},"Blog | GitLab","Tutorials, product information, expert insights, and more from GitLab to help DevSecOps teams build, test, and deploy secure software faster.",{"title":12},"GitLab Blog",{"template":14},"BlogHome","content:en-us:blog:index.yml","yaml","content","en-us/blog/index.yml","en-us/blog/index","yml",{"_path":22,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"data":23,"_id":445,"_type":16,"title":446,"_source":17,"_file":447,"_stem":448,"_extension":20},"/shared/en-us/main-navigation",{"logo":24,"freeTrial":29,"sales":34,"login":39,"items":44,"search":376,"minimal":407,"duo":426,"pricingDeployment":435},{"config":25},{"href":26,"dataGaName":27,"dataGaLocation":28},"/","gitlab logo","header",{"text":30,"config":31},"Get free trial",{"href":32,"dataGaName":33,"dataGaLocation":28},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":35,"config":36},"Talk to sales",{"href":37,"dataGaName":38,"dataGaLocation":28},"/sales/","sales",{"text":40,"config":41},"Sign in",{"href":42,"dataGaName":43,"dataGaLocation":28},"https://gitlab.com/users/sign_in/","sign in",[45,89,186,191,297,357],{"text":46,"config":47,"cards":49,"footer":72},"Platform",{"dataNavLevelOne":48},"platform",[50,56,64],{"title":46,"description":51,"link":52},"The most comprehensive AI-powered DevSecOps Platform",{"text":53,"config":54},"Explore our Platform",{"href":55,"dataGaName":48,"dataGaLocation":28},"/platform/",{"title":57,"description":58,"link":59},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":60,"config":61},"Meet GitLab Duo",{"href":62,"dataGaName":63,"dataGaLocation":28},"/gitlab-duo/","gitlab duo ai",{"title":65,"description":66,"link":67},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":68,"config":69},"Learn more",{"href":70,"dataGaName":71,"dataGaLocation":28},"/why-gitlab/","why gitlab",{"title":73,"items":74},"Get started with",[75,80,85],{"text":76,"config":77},"Platform Engineering",{"href":78,"dataGaName":79,"dataGaLocation":28},"/solutions/platform-engineering/","platform engineering",{"text":81,"config":82},"Developer Experience",{"href":83,"dataGaName":84,"dataGaLocation":28},"/developer-experience/","Developer experience",{"text":86,"config":87},"MLOps",{"href":88,"dataGaName":86,"dataGaLocation":28},"/topics/devops/the-role-of-ai-in-devops/",{"text":90,"left":91,"config":92,"link":94,"lists":98,"footer":168},"Product",true,{"dataNavLevelOne":93},"solutions",{"text":95,"config":96},"View all Solutions",{"href":97,"dataGaName":93,"dataGaLocation":28},"/solutions/",[99,124,147],{"title":100,"description":101,"link":102,"items":107},"Automation","CI/CD and automation to accelerate deployment",{"config":103},{"icon":104,"href":105,"dataGaName":106,"dataGaLocation":28},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[108,112,116,120],{"text":109,"config":110},"CI/CD",{"href":111,"dataGaLocation":28,"dataGaName":109},"/solutions/continuous-integration/",{"text":113,"config":114},"AI-Assisted Development",{"href":62,"dataGaLocation":28,"dataGaName":115},"AI assisted development",{"text":117,"config":118},"Source Code Management",{"href":119,"dataGaLocation":28,"dataGaName":117},"/solutions/source-code-management/",{"text":121,"config":122},"Automated Software Delivery",{"href":105,"dataGaLocation":28,"dataGaName":123},"Automated software delivery",{"title":125,"description":126,"link":127,"items":132},"Security","Deliver code faster without compromising security",{"config":128},{"href":129,"dataGaName":130,"dataGaLocation":28,"icon":131},"/solutions/application-security-testing/","security and compliance","ShieldCheckLight",[133,137,142],{"text":134,"config":135},"Application Security Testing",{"href":129,"dataGaName":136,"dataGaLocation":28},"Application security testing",{"text":138,"config":139},"Software Supply Chain Security",{"href":140,"dataGaLocation":28,"dataGaName":141},"/solutions/supply-chain/","Software supply chain security",{"text":143,"config":144},"Software Compliance",{"href":145,"dataGaName":146,"dataGaLocation":28},"/solutions/software-compliance/","software compliance",{"title":148,"link":149,"items":154},"Measurement",{"config":150},{"icon":151,"href":152,"dataGaName":153,"dataGaLocation":28},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[155,159,163],{"text":156,"config":157},"Visibility & Measurement",{"href":152,"dataGaLocation":28,"dataGaName":158},"Visibility and Measurement",{"text":160,"config":161},"Value Stream Management",{"href":162,"dataGaLocation":28,"dataGaName":160},"/solutions/value-stream-management/",{"text":164,"config":165},"Analytics & Insights",{"href":166,"dataGaLocation":28,"dataGaName":167},"/solutions/analytics-and-insights/","Analytics and insights",{"title":169,"items":170},"GitLab for",[171,176,181],{"text":172,"config":173},"Enterprise",{"href":174,"dataGaLocation":28,"dataGaName":175},"/enterprise/","enterprise",{"text":177,"config":178},"Small Business",{"href":179,"dataGaLocation":28,"dataGaName":180},"/small-business/","small business",{"text":182,"config":183},"Public Sector",{"href":184,"dataGaLocation":28,"dataGaName":185},"/solutions/public-sector/","public sector",{"text":187,"config":188},"Pricing",{"href":189,"dataGaName":190,"dataGaLocation":28,"dataNavLevelOne":190},"/pricing/","pricing",{"text":192,"config":193,"link":195,"lists":199,"feature":284},"Resources",{"dataNavLevelOne":194},"resources",{"text":196,"config":197},"View all resources",{"href":198,"dataGaName":194,"dataGaLocation":28},"/resources/",[200,233,256],{"title":201,"items":202},"Getting started",[203,208,213,218,223,228],{"text":204,"config":205},"Install",{"href":206,"dataGaName":207,"dataGaLocation":28},"/install/","install",{"text":209,"config":210},"Quick start guides",{"href":211,"dataGaName":212,"dataGaLocation":28},"/get-started/","quick setup checklists",{"text":214,"config":215},"Learn",{"href":216,"dataGaLocation":28,"dataGaName":217},"https://university.gitlab.com/","learn",{"text":219,"config":220},"Product documentation",{"href":221,"dataGaName":222,"dataGaLocation":28},"https://docs.gitlab.com/","product documentation",{"text":224,"config":225},"Best practice videos",{"href":226,"dataGaName":227,"dataGaLocation":28},"/getting-started-videos/","best practice videos",{"text":229,"config":230},"Integrations",{"href":231,"dataGaName":232,"dataGaLocation":28},"/integrations/","integrations",{"title":234,"items":235},"Discover",[236,241,246,251],{"text":237,"config":238},"Customer success stories",{"href":239,"dataGaName":240,"dataGaLocation":28},"/customers/","customer success stories",{"text":242,"config":243},"Blog",{"href":244,"dataGaName":245,"dataGaLocation":28},"/blog/","blog",{"text":247,"config":248},"Remote",{"href":249,"dataGaName":250,"dataGaLocation":28},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":252,"config":253},"TeamOps",{"href":254,"dataGaName":255,"dataGaLocation":28},"/teamops/","teamops",{"title":257,"items":258},"Connect",[259,264,269,274,279],{"text":260,"config":261},"GitLab Services",{"href":262,"dataGaName":263,"dataGaLocation":28},"/services/","services",{"text":265,"config":266},"Community",{"href":267,"dataGaName":268,"dataGaLocation":28},"/community/","community",{"text":270,"config":271},"Forum",{"href":272,"dataGaName":273,"dataGaLocation":28},"https://forum.gitlab.com/","forum",{"text":275,"config":276},"Events",{"href":277,"dataGaName":278,"dataGaLocation":28},"/events/","events",{"text":280,"config":281},"Partners",{"href":282,"dataGaName":283,"dataGaLocation":28},"/partners/","partners",{"backgroundColor":285,"textColor":286,"text":287,"image":288,"link":292},"#2f2a6b","#fff","Insights for the future of software development",{"altText":289,"config":290},"the source promo card",{"src":291},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758208064/dzl0dbift9xdizyelkk4.svg",{"text":293,"config":294},"Read the latest",{"href":295,"dataGaName":296,"dataGaLocation":28},"/the-source/","the source",{"text":298,"config":299,"lists":301},"Company",{"dataNavLevelOne":300},"company",[302],{"items":303},[304,309,315,317,322,327,332,337,342,347,352],{"text":305,"config":306},"About",{"href":307,"dataGaName":308,"dataGaLocation":28},"/company/","about",{"text":310,"config":311,"footerGa":314},"Jobs",{"href":312,"dataGaName":313,"dataGaLocation":28},"/jobs/","jobs",{"dataGaName":313},{"text":275,"config":316},{"href":277,"dataGaName":278,"dataGaLocation":28},{"text":318,"config":319},"Leadership",{"href":320,"dataGaName":321,"dataGaLocation":28},"/company/team/e-group/","leadership",{"text":323,"config":324},"Team",{"href":325,"dataGaName":326,"dataGaLocation":28},"/company/team/","team",{"text":328,"config":329},"Handbook",{"href":330,"dataGaName":331,"dataGaLocation":28},"https://handbook.gitlab.com/","handbook",{"text":333,"config":334},"Investor relations",{"href":335,"dataGaName":336,"dataGaLocation":28},"https://ir.gitlab.com/","investor relations",{"text":338,"config":339},"Trust Center",{"href":340,"dataGaName":341,"dataGaLocation":28},"/security/","trust center",{"text":343,"config":344},"AI Transparency Center",{"href":345,"dataGaName":346,"dataGaLocation":28},"/ai-transparency-center/","ai transparency center",{"text":348,"config":349},"Newsletter",{"href":350,"dataGaName":351,"dataGaLocation":28},"/company/contact/","newsletter",{"text":353,"config":354},"Press",{"href":355,"dataGaName":356,"dataGaLocation":28},"/press/","press",{"text":358,"config":359,"lists":360},"Contact us",{"dataNavLevelOne":300},[361],{"items":362},[363,366,371],{"text":35,"config":364},{"href":37,"dataGaName":365,"dataGaLocation":28},"talk to sales",{"text":367,"config":368},"Get help",{"href":369,"dataGaName":370,"dataGaLocation":28},"/support/","get help",{"text":372,"config":373},"Customer portal",{"href":374,"dataGaName":375,"dataGaLocation":28},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":377,"login":378,"suggestions":385},"Close",{"text":379,"link":380},"To search repositories and projects, login to",{"text":381,"config":382},"gitlab.com",{"href":42,"dataGaName":383,"dataGaLocation":384},"search login","search",{"text":386,"default":387},"Suggestions",[388,390,394,396,400,404],{"text":57,"config":389},{"href":62,"dataGaName":57,"dataGaLocation":384},{"text":391,"config":392},"Code Suggestions (AI)",{"href":393,"dataGaName":391,"dataGaLocation":384},"/solutions/code-suggestions/",{"text":109,"config":395},{"href":111,"dataGaName":109,"dataGaLocation":384},{"text":397,"config":398},"GitLab on AWS",{"href":399,"dataGaName":397,"dataGaLocation":384},"/partners/technology-partners/aws/",{"text":401,"config":402},"GitLab on Google Cloud",{"href":403,"dataGaName":401,"dataGaLocation":384},"/partners/technology-partners/google-cloud-platform/",{"text":405,"config":406},"Why GitLab?",{"href":70,"dataGaName":405,"dataGaLocation":384},{"freeTrial":408,"mobileIcon":413,"desktopIcon":418,"secondaryButton":421},{"text":409,"config":410},"Start free trial",{"href":411,"dataGaName":33,"dataGaLocation":412},"https://gitlab.com/-/trials/new/","nav",{"altText":414,"config":415},"Gitlab Icon",{"src":416,"dataGaName":417,"dataGaLocation":412},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203874/jypbw1jx72aexsoohd7x.svg","gitlab icon",{"altText":414,"config":419},{"src":420,"dataGaName":417,"dataGaLocation":412},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203875/gs4c8p8opsgvflgkswz9.svg",{"text":422,"config":423},"Get Started",{"href":424,"dataGaName":425,"dataGaLocation":412},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":427,"mobileIcon":431,"desktopIcon":433},{"text":428,"config":429},"Learn more about GitLab Duo",{"href":62,"dataGaName":430,"dataGaLocation":412},"gitlab duo",{"altText":414,"config":432},{"src":416,"dataGaName":417,"dataGaLocation":412},{"altText":414,"config":434},{"src":420,"dataGaName":417,"dataGaLocation":412},{"freeTrial":436,"mobileIcon":441,"desktopIcon":443},{"text":437,"config":438},"Back to pricing",{"href":189,"dataGaName":439,"dataGaLocation":412,"icon":440},"back to pricing","GoBack",{"altText":414,"config":442},{"src":416,"dataGaName":417,"dataGaLocation":412},{"altText":414,"config":444},{"src":420,"dataGaName":417,"dataGaLocation":412},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":450,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":451,"button":452,"image":457,"config":461,"_id":463,"_type":16,"_source":17,"_file":464,"_stem":465,"_extension":20},"/shared/en-us/banner","is now in public beta!",{"text":453,"config":454},"Try the Beta",{"href":455,"dataGaName":456,"dataGaLocation":28},"/gitlab-duo/agent-platform/","duo banner",{"altText":458,"config":459},"GitLab Duo Agent Platform",{"src":460},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1753720689/somrf9zaunk0xlt7ne4x.svg",{"layout":462},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":467,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"data":468,"_id":707,"_type":16,"title":708,"_source":17,"_file":709,"_stem":710,"_extension":20},"/shared/en-us/main-footer",{"text":469,"source":470,"edit":476,"contribute":481,"config":486,"items":491,"minimal":699},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":471,"config":472},"View page source",{"href":473,"dataGaName":474,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":477,"config":478},"Edit this page",{"href":479,"dataGaName":480,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":482,"config":483},"Please contribute",{"href":484,"dataGaName":485,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":487,"facebook":488,"youtube":489,"linkedin":490},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[492,539,592,636,665],{"title":187,"links":493,"subMenu":508},[494,498,503],{"text":495,"config":496},"View plans",{"href":189,"dataGaName":497,"dataGaLocation":475},"view plans",{"text":499,"config":500},"Why Premium?",{"href":501,"dataGaName":502,"dataGaLocation":475},"/pricing/premium/","why premium",{"text":504,"config":505},"Why Ultimate?",{"href":506,"dataGaName":507,"dataGaLocation":475},"/pricing/ultimate/","why ultimate",[509],{"title":510,"links":511},"Contact Us",[512,515,517,519,524,529,534],{"text":513,"config":514},"Contact sales",{"href":37,"dataGaName":38,"dataGaLocation":475},{"text":367,"config":516},{"href":369,"dataGaName":370,"dataGaLocation":475},{"text":372,"config":518},{"href":374,"dataGaName":375,"dataGaLocation":475},{"text":520,"config":521},"Status",{"href":522,"dataGaName":523,"dataGaLocation":475},"https://status.gitlab.com/","status",{"text":525,"config":526},"Terms of use",{"href":527,"dataGaName":528,"dataGaLocation":475},"/terms/","terms of use",{"text":530,"config":531},"Privacy statement",{"href":532,"dataGaName":533,"dataGaLocation":475},"/privacy/","privacy statement",{"text":535,"config":536},"Cookie preferences",{"dataGaName":537,"dataGaLocation":475,"id":538,"isOneTrustButton":91},"cookie preferences","ot-sdk-btn",{"title":90,"links":540,"subMenu":548},[541,545],{"text":542,"config":543},"DevSecOps platform",{"href":55,"dataGaName":544,"dataGaLocation":475},"devsecops platform",{"text":113,"config":546},{"href":62,"dataGaName":547,"dataGaLocation":475},"ai-assisted development",[549],{"title":550,"links":551},"Topics",[552,557,562,567,572,577,582,587],{"text":553,"config":554},"CICD",{"href":555,"dataGaName":556,"dataGaLocation":475},"/topics/ci-cd/","cicd",{"text":558,"config":559},"GitOps",{"href":560,"dataGaName":561,"dataGaLocation":475},"/topics/gitops/","gitops",{"text":563,"config":564},"DevOps",{"href":565,"dataGaName":566,"dataGaLocation":475},"/topics/devops/","devops",{"text":568,"config":569},"Version Control",{"href":570,"dataGaName":571,"dataGaLocation":475},"/topics/version-control/","version control",{"text":573,"config":574},"DevSecOps",{"href":575,"dataGaName":576,"dataGaLocation":475},"/topics/devsecops/","devsecops",{"text":578,"config":579},"Cloud Native",{"href":580,"dataGaName":581,"dataGaLocation":475},"/topics/cloud-native/","cloud native",{"text":583,"config":584},"AI for Coding",{"href":585,"dataGaName":586,"dataGaLocation":475},"/topics/devops/ai-for-coding/","ai for coding",{"text":588,"config":589},"Agentic AI",{"href":590,"dataGaName":591,"dataGaLocation":475},"/topics/agentic-ai/","agentic ai",{"title":593,"links":594},"Solutions",[595,597,599,604,608,611,615,618,620,623,626,631],{"text":134,"config":596},{"href":129,"dataGaName":134,"dataGaLocation":475},{"text":123,"config":598},{"href":105,"dataGaName":106,"dataGaLocation":475},{"text":600,"config":601},"Agile development",{"href":602,"dataGaName":603,"dataGaLocation":475},"/solutions/agile-delivery/","agile delivery",{"text":605,"config":606},"SCM",{"href":119,"dataGaName":607,"dataGaLocation":475},"source code management",{"text":553,"config":609},{"href":111,"dataGaName":610,"dataGaLocation":475},"continuous integration & delivery",{"text":612,"config":613},"Value stream management",{"href":162,"dataGaName":614,"dataGaLocation":475},"value stream management",{"text":558,"config":616},{"href":617,"dataGaName":561,"dataGaLocation":475},"/solutions/gitops/",{"text":172,"config":619},{"href":174,"dataGaName":175,"dataGaLocation":475},{"text":621,"config":622},"Small business",{"href":179,"dataGaName":180,"dataGaLocation":475},{"text":624,"config":625},"Public sector",{"href":184,"dataGaName":185,"dataGaLocation":475},{"text":627,"config":628},"Education",{"href":629,"dataGaName":630,"dataGaLocation":475},"/solutions/education/","education",{"text":632,"config":633},"Financial services",{"href":634,"dataGaName":635,"dataGaLocation":475},"/solutions/finance/","financial services",{"title":192,"links":637},[638,640,642,644,647,649,651,653,655,657,659,661,663],{"text":204,"config":639},{"href":206,"dataGaName":207,"dataGaLocation":475},{"text":209,"config":641},{"href":211,"dataGaName":212,"dataGaLocation":475},{"text":214,"config":643},{"href":216,"dataGaName":217,"dataGaLocation":475},{"text":219,"config":645},{"href":221,"dataGaName":646,"dataGaLocation":475},"docs",{"text":242,"config":648},{"href":244,"dataGaName":245,"dataGaLocation":475},{"text":237,"config":650},{"href":239,"dataGaName":240,"dataGaLocation":475},{"text":247,"config":652},{"href":249,"dataGaName":250,"dataGaLocation":475},{"text":260,"config":654},{"href":262,"dataGaName":263,"dataGaLocation":475},{"text":252,"config":656},{"href":254,"dataGaName":255,"dataGaLocation":475},{"text":265,"config":658},{"href":267,"dataGaName":268,"dataGaLocation":475},{"text":270,"config":660},{"href":272,"dataGaName":273,"dataGaLocation":475},{"text":275,"config":662},{"href":277,"dataGaName":278,"dataGaLocation":475},{"text":280,"config":664},{"href":282,"dataGaName":283,"dataGaLocation":475},{"title":298,"links":666},[667,669,671,673,675,677,679,683,688,690,692,694],{"text":305,"config":668},{"href":307,"dataGaName":300,"dataGaLocation":475},{"text":310,"config":670},{"href":312,"dataGaName":313,"dataGaLocation":475},{"text":318,"config":672},{"href":320,"dataGaName":321,"dataGaLocation":475},{"text":323,"config":674},{"href":325,"dataGaName":326,"dataGaLocation":475},{"text":328,"config":676},{"href":330,"dataGaName":331,"dataGaLocation":475},{"text":333,"config":678},{"href":335,"dataGaName":336,"dataGaLocation":475},{"text":680,"config":681},"Sustainability",{"href":682,"dataGaName":680,"dataGaLocation":475},"/sustainability/",{"text":684,"config":685},"Diversity, inclusion and belonging (DIB)",{"href":686,"dataGaName":687,"dataGaLocation":475},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":338,"config":689},{"href":340,"dataGaName":341,"dataGaLocation":475},{"text":348,"config":691},{"href":350,"dataGaName":351,"dataGaLocation":475},{"text":353,"config":693},{"href":355,"dataGaName":356,"dataGaLocation":475},{"text":695,"config":696},"Modern Slavery Transparency Statement",{"href":697,"dataGaName":698,"dataGaLocation":475},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"items":700},[701,703,705],{"text":525,"config":702},{"href":527,"dataGaName":528,"dataGaLocation":475},{"text":530,"config":704},{"href":532,"dataGaName":533,"dataGaLocation":475},{"text":535,"config":706},{"dataGaName":537,"dataGaLocation":475,"id":538,"isOneTrustButton":91},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"seo":712,"content":713,"config":714,"_id":15,"_type":16,"title":7,"_source":17,"_file":18,"_stem":19,"_extension":20},{"title":9,"description":10},{"title":12},{"template":14},[716,729,741,752,763,773,784,795,806,816],{"_path":717,"_dir":718,"_draft":6,"_partial":6,"_locale":7,"seo":719,"content":722,"config":723,"_id":726,"_type":16,"title":720,"_source":17,"_file":727,"_stem":728,"_extension":20},"/en-us/blog/categories/agile-planning","categories",{"title":720,"description":721},"Agile Planning","Browse articles related to Agile Planning on the GitLab Blog",{"name":720},{"template":724,"slug":725,"hide":6},"BlogCategory","agile-planning","content:en-us:blog:categories:agile-planning.yml","en-us/blog/categories/agile-planning.yml","en-us/blog/categories/agile-planning",{"_path":730,"_dir":718,"_draft":6,"_partial":6,"_locale":7,"seo":731,"content":734,"config":735,"_id":737,"_type":16,"title":738,"_source":17,"_file":739,"_stem":740,"_extension":20},"/en-us/blog/categories/ai-ml",{"title":732,"description":733},"AI/ML","Browse articles related to AI/ML on the GitLab Blog",{"name":732},{"template":724,"slug":736,"hide":6},"ai-ml","content:en-us:blog:categories:ai-ml.yml","Ai Ml","en-us/blog/categories/ai-ml.yml","en-us/blog/categories/ai-ml",{"_path":742,"_dir":718,"_draft":6,"_partial":6,"_locale":7,"seo":743,"content":746,"config":747,"_id":749,"_type":16,"title":744,"_source":17,"_file":750,"_stem":751,"_extension":20},"/en-us/blog/categories/bulletin-board",{"title":744,"description":745},"Bulletin Board","Browse articles related to Bulletin Board on the GitLab Blog",{"name":744},{"template":724,"slug":748,"hide":6},"bulletin-board","content:en-us:blog:categories:bulletin-board.yml","en-us/blog/categories/bulletin-board.yml","en-us/blog/categories/bulletin-board",{"_path":753,"_dir":718,"_draft":6,"_partial":6,"_locale":7,"seo":754,"content":757,"config":758,"_id":760,"_type":16,"title":755,"_source":17,"_file":761,"_stem":762,"_extension":20},"/en-us/blog/categories/customer-stories",{"title":755,"description":756},"Customer Stories","Browse articles related to Customer Stories on the GitLab Blog",{"name":755},{"template":724,"slug":759,"hide":6},"customer-stories","content:en-us:blog:categories:customer-stories.yml","en-us/blog/categories/customer-stories.yml","en-us/blog/categories/customer-stories",{"_path":764,"_dir":718,"_draft":6,"_partial":6,"_locale":7,"seo":765,"content":767,"config":768,"_id":769,"_type":16,"title":770,"_source":17,"_file":771,"_stem":772,"_extension":20},"/en-us/blog/categories/devsecops",{"title":573,"description":766},"Browse articles related to DevSecOps on the GitLab Blog",{"name":573},{"template":724,"slug":576,"hide":6},"content:en-us:blog:categories:devsecops.yml","Devsecops","en-us/blog/categories/devsecops.yml","en-us/blog/categories/devsecops",{"_path":774,"_dir":718,"_draft":6,"_partial":6,"_locale":7,"seo":775,"content":778,"config":779,"_id":781,"_type":16,"title":776,"_source":17,"_file":782,"_stem":783,"_extension":20},"/en-us/blog/categories/engineering",{"title":776,"description":777},"Engineering","Browse articles related to Engineering on the GitLab Blog",{"name":776},{"template":724,"slug":780,"hide":6},"engineering","content:en-us:blog:categories:engineering.yml","en-us/blog/categories/engineering.yml","en-us/blog/categories/engineering",{"_path":785,"_dir":718,"_draft":6,"_partial":6,"_locale":7,"seo":786,"content":789,"config":790,"_id":792,"_type":16,"title":787,"_source":17,"_file":793,"_stem":794,"_extension":20},"/en-us/blog/categories/news",{"title":787,"description":788},"News","Browse articles related to News on the GitLab Blog",{"name":787},{"template":724,"slug":791,"hide":6},"news","content:en-us:blog:categories:news.yml","en-us/blog/categories/news.yml","en-us/blog/categories/news",{"_path":796,"_dir":718,"_draft":6,"_partial":6,"_locale":7,"seo":797,"content":800,"config":801,"_id":803,"_type":16,"title":798,"_source":17,"_file":804,"_stem":805,"_extension":20},"/en-us/blog/categories/open-source",{"title":798,"description":799},"Open Source","Browse articles related to Open Source on the GitLab Blog",{"name":798},{"template":724,"slug":802,"hide":6},"open-source","content:en-us:blog:categories:open-source.yml","en-us/blog/categories/open-source.yml","en-us/blog/categories/open-source",{"_path":807,"_dir":718,"_draft":6,"_partial":6,"_locale":7,"seo":808,"content":810,"config":811,"_id":813,"_type":16,"title":90,"_source":17,"_file":814,"_stem":815,"_extension":20},"/en-us/blog/categories/product",{"title":90,"description":809},"Browse articles related to Product on the GitLab Blog",{"name":90},{"template":724,"slug":812,"hide":6},"product","content:en-us:blog:categories:product.yml","en-us/blog/categories/product.yml","en-us/blog/categories/product",{"_path":817,"_dir":718,"_draft":6,"_partial":6,"_locale":7,"seo":818,"content":820,"config":821,"_id":823,"_type":16,"title":125,"_source":17,"_file":824,"_stem":825,"_extension":20},"/en-us/blog/categories/security",{"title":125,"description":819},"Browse articles related to Security on the GitLab Blog",{"name":125},{"template":724,"slug":822,"hide":6},"security","content:en-us:blog:categories:security.yml","en-us/blog/categories/security.yml","en-us/blog/categories/security",[827,871,910,947,988,1030,1070,1108,1147,1178],{"category":720,"slug":725,"posts":828},[829,846,860],{"content":830,"config":843},{"title":831,"description":832,"authors":833,"date":837,"body":838,"category":725,"tags":839,"heroImage":842},"Embedded views: The future of work tracking in GitLab","Learn how embedded views, powered by GitLab Query Language, help GitLab teams work more efficiently, make data-driven decisions, and maintain visibility across complex workflows.",[834,835,836],"Matthew Macfarlane","Himanshu Kapoor","Alex Fracazo","2025-08-21","Ever find yourself switching between tabs in GitLab just to keep track of\nwhat’s happening in your project? Maybe you’re checking on an issue, then\njumping to a merge request, then over to an epic to see how everything\nconnects. Before you know it, you’ve got a browser full of tabs and you’ve\nlost your train of thought.\n\nIf that sounds familiar, you’re definitely not alone. So many teams waste time and energy flipping through various items in their project management software, just trying to get a handle on their work.\n\nThat's why we created [embedded views](https://docs.gitlab.com/user/glql/#embedded-views), powered by [GitLab Query Language (GLQL)](https://docs.gitlab.com/user/glql/). With embedded views, [available in 18.3](https://about.gitlab.com/releases/2025/08/21/gitlab-18-3-released/), you get live, relevant information right where you’re already working in GitLab. No more endless context switching. No more outdated reports. Just the info you need, right when you need it.\n\n## Why embedded views matter\n\nEmbedded views are more than just a new feature, they're a fundamental shift in how teams understand and track their work within GitLab. With embedded views, teams can maintain context while accessing real-time information, creating shared understanding, and improving collaboration without ever leaving their current workflow. It’s about making work tracking feel natural and effortless, so you can focus on what matters.\n\n## How it works: Real-time data right where you need it the most\n\nEmbedded views let you insert live GLQL queries in Markdown code blocks throughout wiki pages, epics, issues, and merge requests. Here's what makes them so useful:\n\n### Always up to date\n\nGLQL queries are dynamic, pulling fresh data each time the page loads, so your embedded views always reflect the current state of your work, not the state when you embedded the view. When changes happen to issues, merge requests, or milestones, a page refresh will show those updates in your embedded view.\n\n### Contextual awareness\n\nUse functions like `currentUser()` and `today()` to make queries context-specific. Your embedded views automatically adapt to show relevant information for whoever is viewing them, creating personalized experiences without manual configuration.\n\n### Powerful filtering\n\nFilter by fields like assignee, author, label, milestone, health status, creation date, and more. Use logical expressions to get exactly the data you want. We support more than 30 fields as of 18.3.\n\n### Customizable display\n\nYou can display your data as a table, a list, or a numbered list. Choose which fields to show, set a limit on the number of items, and specify the sort order to keep your view focused and actionable.\n\n### Availability\n\nYou can use embedded views in group and project wikis, epic and issue descriptions, merge requests, and comments. GLQL is available across all GitLab tiers: Free, Premium, and Ultimate, on GitLab.com, GitLab Self-Managed, and GitLab Dedicated. Certain functionality, such as displaying epics, status, custom fields, iterations, and weights, is available in the Premium and Ultimate tiers. Displaying health status is available only in Ultimate.\n\n## See embedded views in action\n\nThe syntax of an embedded view's source is a superset of YAML that consists of:\n\n* The `query` parameter: Expressions joined together with a logical operator, such as `and`.\n* Parameters related to the presentation layer, like `display`, `limit`, or `fields`, `title`, and `description`\n  represented as YAML.\n\nA view is defined in Markdown as a code block, similar to other code blocks like Mermaid.\n\nFor example:\n\n> Display a table of first 5 open issues assigned to the authenticated user in `gitlab-org/gitlab`.\n> Display columns `title`, `state`, `health`, `description`, `epic`, `milestone`, `weight`, and `updated`.\n\n````yaml\n```glql\ndisplay: table\ntitle: GLQL table 🎉\ndescription: This view lists my open issues\nfields: title, state, health, epic, milestone, weight, updated\nlimit: 5\nquery: project = \"gitlab-org/gitlab\" AND assignee = currentUser() AND state = opened\n```\n````\n\nThis source should render a table like the one below:\n\n![](https://res.cloudinary.com/about-gitlab-com/image/upload/v1755193172/ibzfopvpztpglnccwrjj.png)\n\nAn easy way to create your first embedded view is to navigate to the **More options** dropdown in the rich text editor toolbar. Once in this toolbar, select **Embedded view**, which populates the following query in a Markdown code block:\n\n````yaml\n```glql\nquery: assignee = currentUser()\nfields: title, createdAt, milestone, assignee\ntitle: Issues assigned to current user\n```\n````\n\nSave your changes to the comment or description where the code block appears, and you're done! You've successfully created your first embedded view!\n\n## How GitLab uses embedded views\n\nWhether tracking merge requests targeting security releases, triaging bugs to improve backlog hygiene, or managing team onboarding and milestone planning, we rely on embedded views for mission-critical processes every day. This isn't just a feature we built, it's a tool we depend on to run our business effectively. When you adopt embedded views, you're getting a tested solution that's already helping GitLab teams work more efficiently, make data-driven decisions, and maintain visibility across complex workflows. Simply stated, embedded views can transform how your team accesses and analyzes the work that matters most to your success.\n\nTo learn and see more about how GitLab is using embedded views internally, check out [How GitLab measures Red Team impact: The adoption rate metric](https://about.gitlab.com/blog/how-gitlab-measures-red-team-impact-the-adoption-rate-metric/), and Global Search Release Planning issues for the [18.1](https://gitlab.com/gitlab-org/search-team/team-tasks/-/issues/239), [18.2](https://gitlab.com/gitlab-org/search-team/team-tasks/-/issues/241), and [18.3](https://gitlab.com/gitlab-org/search-team/team-tasks/-/issues/245) milestones.\n\n## What's next\n\nEmbedded views are just the start of [Knowledge Group's](https://about.gitlab.com/direction/plan/knowledge/) vision for work tracking. Learn more about what we're focusing on next in the [embedded views post-GA epic](https://gitlab.com/groups/gitlab-org/-/epics/15249). As embedded views evolve we're committed to making them even more powerful and [accessible](https://gitlab.com/gitlab-org/gitlab/-/issues/548722).\n\n## Share your experience\n\nShare your feedback in the [embedded views GA feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/509792) or via the [embedded views GA survey](https://gitlab.fra1.qualtrics.com/jfe/form/SV_6PFhgZMBA06kr7E). Whether you've discovered innovative use cases, encountered challenges, or have ideas for improvements, we want to hear from you.\n",[840,542,841],"agile","workflow","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099072/Blog/Hero%20Images/Blog/Hero%20Images/agile_agile.png_1750099072322.png",{"featured":6,"template":844,"slug":845},"BlogPost","embedded-views-the-future-of-work-tracking-in-gitlab",{"content":847,"config":858},{"title":848,"description":849,"authors":850,"heroImage":852,"date":853,"body":854,"category":725,"tags":855},"SAFe without silos in GitLab","Learn how to map the Scaled Agile Framework to the native capabilities of the DevSecOps platform and the advantages that come from doing so.",[851],"Amanda Rueda","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097569/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2811%29_2hcwWx49wQ7CHfvhhkVH6S_1750097569126.png","2025-04-08","Let's talk about what happens when your organization adopts the Scaled Agile Framework (SAFe) to scale to enterprise levels. You've got multiple teams working on complex products, and you need a way to coordinate all that work. But here's a common headache: Your planning happens in one tool, while your actual development work lives somewhere else entirely.\n\nThis divide creates real problems day-to-day. Developers jump between systems constantly. Product managers struggle to get an accurate picture of progress. And everyone wastes time manually copying information from one place to another. It's precisely the kind of disjointed experience that SAFe was designed to eliminate.\n\nWhile your development teams might already be using GitLab for source code management, CI/CD, and security, you may wonder whether GitLab can also support your planning needs within the SAFe framework. The good news is that GitLab's Agile project management capabilities offer strong support for SAFe, in this article, you'll learn how GitLab maps to SAFe concepts and ceremonies, all within the same DevSecOps platform your software developers already know and love.\n\n## What is SAFe?\n\nSAFe, or the Scaled Agile Framework, is a way to bring Agile principles to large organizations without losing speed, alignment, or customer focus. It takes the iterative and flexible teamwork model of small teams and applies its principles across big organizations that have multiple teams, roadmaps, and stakeholders. This brings the organization into alignment, all planning and executing in the same direction. For product managers, SAFe helps connect strategy to execution so you’re not just shipping fast, you’re shipping the right things, backed by clear priorities and cross-team alignment.\n\nSAFe reduces silos, encourages collaboration, and helps teams rally around customer outcomes, not just tasks. When integrated in GitLab, the magic really happens: visibility, traceability, and delivery all live in one place.\n\n## SAFe terminology in GitLab\n\nFirst, let's establish how SAFe concepts map to GitLab:\n\n| SAFe | GitLab |\n| :---- | :---- |\n| Epic | Top-level Epic |\n| Capability | Sub-epic (Level 1) |\n| Feature | Sub-epic (Level 2) |\n| User Story | Issue |\n| Task | Task |\n| Team | Custom Field / Scoped Label |\n| Sprint | Iteration |\n| Program Increment (PI) | Milestone |\n| Value Stream | Top-level Group |\n| Agile Release Train (ART) | Top-level Group |\n\n\u003Cbr>\u003C/br>\n\nWith this mapping as your guide, you can set up GitLab to mirror your SAFe implementation. The group structure lets you organize around your value streams and ARTs, while the work item hierarchy (with up to seven levels of nested epics!) gives you all the depth you need for complex product portfolios. Whether you're working at the portfolio level (with top-level groups), program level (with subgroups), or team level (with projects), GitLab's organizational structure aligns perfectly with SAFe's hierarchy.\n\n## Supporting SAFe ceremonies in GitLab\n\nNow for the fun part - how do you actually run your SAFe ceremonies in GitLab? Let's walk through each one.\n\n### PI planning\n\nTo facilitate the cross-team alignment and dependency management that makes PI planning successful, GitLab offers several capabilities:\n\n* Use the [Roadmap](https://docs.gitlab.com/user/group/roadmap/) view to visualize features across teams and time periods\n* Assign features to the PI [milestone](https://docs.gitlab.com/user/project/milestones/)\n* Document and visualize cross-team [dependencies](https://docs.gitlab.com/user/project/issues/related_issues/#blocking-issues) as they're identified\n\nGitLab gives you flexibility for PI planning through both the Epic boards (which can be configured to show team assignments) and the Roadmap view (which shows features over time like a Gantt chart). You can switch between these views during your planning session depending on whether you're focusing on the timeline or team organization.\n\n![Roadmap view and epic board](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097577/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097576746.gif)\n\n\u003Cbr>\u003C/br>\n\n![Roadmap view with Gantt chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097577/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097576747.png)\n\n### Refinement\n\nAs a product manager, running effective refinement sessions means having clear visibility into your feature backlog. You can run your refinement session right inside GitLab. No more updating one tool during the meeting and then having to update another tool afterward.\n\nGitLab powers refinement sessions with:\n\n* [Epic boards](https://docs.gitlab.com/user/group/epics/epic_boards/) that group features based on status\n* The ability to view story points directly in the [overview](https://docs.gitlab.com/user/group/epics/epic_boards/#view-count-of-issues-weight-and-progress-of-an-epic)\n* Comprehensive [drawer views](https://docs.gitlab.com/user/group/epics/manage_epics/#open-epics-in-a-drawer) that let you interact with work items without losing context\n* The ability to create and link [child issues](https://docs.gitlab.com/user/group/epics/manage_epics/#add-an-issue-to-an-epic) directly from epics\n\n![SAFe - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097577/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097576749.gif)\n\n### Sprint planning\n\nWhen it's time to figure out what your team can tackle in the next sprint, GitLab gives you:\n\n* [Issue boards](https://docs.gitlab.com/user/project/issue_board/) that provide a comprehensive view of your backlog\n* [Total weight](https://docs.gitlab.com/user/project/issue_board/#sum-of-issue-weights) of user stories displayed directly on boards\n* The ability to easily move issues between iterations\n* A collapsible view that simplifies moving stories between sprints\n\nThis means you can keep everything in one place and spend your planning meetings actually planning instead of jumping between tools.\n\n![Sprint planning with GitLab](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097577/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097576751.gif)\n\n*💡 Check out [this tutorial on using GitLab to facilitate Scrum](https://docs.gitlab.com/tutorials/scrum_events/) for a detailed glimpse into the power of GitLab in Agile planning and sprint tracking.*\n\n### Daily stand-ups\n\nYour team can gather around the board during daily stand-ups and actually see what everyone's working on, what's stuck, and what's ready for review – all in one view. For your dev team's daily stand-ups, GitLab lets you:\n\n* Create [iteration-scoped](https://docs.gitlab.com/user/project/issue_board/#iteration-lists) boards that show the current sprint's work\n* Display story points/weights directly on cards\n* Use the [drawer view](https://docs.gitlab.com/user/project/issues/managing_issues/#open-issues-in-a-drawer) to access details without leaving the context\n* Highlight tasks at risk through [health status](https://docs.gitlab.com/user/project/issues/managing_issues/#health-status)\n\n![Daily stand-up board](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097577/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097576755.png)\n\n### Sprint review\n\nWant to know how your team is doing over time? GitLab provides comprehensive metrics with:\n\n* [Burndown and burnup charts](https://docs.gitlab.com/user/group/iterations/#iteration-burndown-and-burnup-charts) for iterations\n* Velocity tracking\n* [Lead and cycle time](https://docs.gitlab.com/user/group/value_stream_analytics/#lifecycle-metrics) metrics\n* Dashboards that can be scoped to teams\n\nThese metrics help you understand if your team is getting faster, where they're getting stuck, and what you might want to talk about in your next retrospective.\n\n![Burndown and burnup charts](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097577/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097576758.png)\n\n## 5 reasons a unified platform provides an advantage\n\nI know there are plenty of planning tools that can handle SAFe ceremonies. But there are game-changing reasons why I genuinely believe GitLab is different:\n\n1. **No more context switching** - Your planning, coding, testing, and security all happen in one place.\n2. **Everything's connected** - You can trace work from the big epic down to the code and deployment.\n3. **Everyone's on the same page** - Developers, product folks, and security teams all work together in the same tool.\n4. **Total visibility** - Stakeholders have one place to check for updates.\n5. **The full picture** - You see planning and development metrics together, so you know what's really going on.\n\nIf your dev teams already love GitLab, why make them jump to another tool for planning or create some complex, cobbled-together integrations? Bringing your SAFe planning into GitLab creates a much smoother experience for everyone.\n\n## Implementation principles\n\nI've worked with teams transitioning from traditional SAFe tools to GitLab, and here's what I've learned: Focus on **what each ceremony is trying to accomplish**, not on recreating exact replicas of your old tools.\n\nThe teams that get the most out of GitLab are the ones who embrace its native capabilities instead of fighting against them. Yes, it takes some initial work to figure out how to map your SAFe concepts and set up your workflows. But once you do, you'll find your processes actually get simpler rather than more complex.\n\nThe key is defining conventions that everyone follows. Which labels mean what? How will you track teams? What goes in an epic versus an issue? With a little upfront investment in these decisions, you'll end up with an intuitive system that eliminates all that cross-tool coordination overhead.\n\n## Getting started\n\nReady to give this a shot? Here's how to start implementing SAFe in GitLab:\n\n1. **Set up your structure** - Create groups and subgroups that [match your organization](https://about.gitlab.com/blog/best-practices-to-set-up-organizational-hierarchies-that-scale/).\n2. **Define your work breakdown** - Decide how you'll use [epics](https://about.gitlab.com/blog/unlocking-agile-excellence-gitlab-epics-for-seamless-portfolio-management/), [issues](https://docs.gitlab.com/user/project/issues/managing_issues/), and [tasks](https://docs.gitlab.com/user/tasks/).\n3. **Create your iterations** - Set up your [sprint schedule](https://docs.gitlab.com/user/group/iterations/#create-an-iteration-cadence).\n4. **Add your milestones** - [Milestones](https://docs.gitlab.com/user/project/milestones/#create-a-milestone) will represent your Program Increments in GitLab.\n5. **Build your boards** - Create different views for different ceremonies.\n6. **Agree on conventions** - Document how you'll use labels and custom fields.\n\nTaking time to think through these decisions upfront will save you many headaches later. And remember, you don't have to perfect it on day one - you can always adjust as you learn.\n\n## Bringing it all together\n\nGitLab gives you a solid foundation for running SAFe, especially if your dev teams are already GitLab fans. When you bring planning and development into the same tool, you eliminate those painful handoffs, make collaboration way easier, and get everything moving faster.\n\nThe beauty of GitLab's planning tools is that they're flexible enough to adapt to your specific flavor of SAFe. You're not locked into rigid workflows - you can evolve your approach as your teams mature and your needs change.\n\n> Ready to see how much better life is without those planning silos? [Start your free trial today](https://about.gitlab.com/free-trial/) and experience firsthand how GitLab can transform your SAFe implementation.\n\n*💡 If you liked this topic check out this related post - [GitLab for Agile Software Development](https://about.gitlab.com/blog/gitlab-for-agile-software-development/)*\n",[840,542,856,812,857],"features","tutorial",{"slug":859,"featured":91,"template":844},"safe-without-silos-in-gitlab",{"content":861,"config":869},{"title":862,"description":863,"authors":864,"heroImage":865,"date":866,"body":867,"category":725,"tags":868},"How to harmonize Agile sprints with product roadmaps","Apply best practices and GitLab features to your product journey, including creating centralized roadmaps, conducting review sessions, and tracking sprint lifecycles.",[851],"https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097231/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2821%29_2pdp2MNB7SoP4MhhiI1WIa_1750097230664.png","2025-02-04","Picture this: Product and Development teams are working in isolation. Product has created a 12-month roadmap and communicated it to internal stakeholders but didn't review it with their development team. Dev starts building the features planned for the upcoming sprint without considering the broader product roadmap, leading to missed opportunities to optimize timing, like running projects in parallel, accounting for team capacity, or building reusable APIs that could serve multiple initiatives. The lack of coordination results in inefficiencies and delayed value delivery.\n\nBalancing short-term wins with long-term vision isn’t easy; it requires clear communication, aligned priorities, and the right tools. In this guide, you'll learn strategies to help harmonize your Agile sprints with strategic roadmaps, tackle common challenges, and uncover actionable solutions tailored to your teams.\n\n## The importance of a single source of truth\n\nA consistent single source of truth for roadmaps with longer-range goals ensures you and your teams have access to up-to-date information about the bigger picture. In practice, this means maintaining a single, regularly updated platform where all roadmap details reside rather than keeping versions of the roadmap across multiple formats, each typically with slightly different information, causing a misaligned understanding of where you're headed.\n\n### Create a centralized roadmap\n\nBy creating a centralized roadmap for your team, you can:\n\n* communicate long-range strategy\n* minimize miscommunication\n* facilitate cross-functional alignment\n* quickly adapt to changes without losing context\n* self-serve information, reducing dependency on a single point of contact who retains the information\n\n***GitLab tip**: Use [epics](https://docs.gitlab.com/ee/user/group/epics/) and [Roadmap view](https://docs.gitlab.com/ee/user/group/roadmap/) to support both product planning and the transparent monitoring of delivery. The Roadmap view allows you to track progress, identify bottlenecks, and ensure alignment between high-level goals and sprint-level execution.*\n\n![Roadmap view for group](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097239/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097239117.png)\n\n## Collaborative roadmap review practices\n\nEstablish a regular review and sign-off process for roadmap updates that include Product, Engineering, and UX as part of the [product trio](https://www.producttalk.org/product-trio/). Collaborative reviews help you maintain alignment and minimize risk. At GitLab, I meet with my engineering manager and UX designer monthly to review and obtain sign-offs on any changes. We maintain a running sign-off on the roadmap wiki page itself that holds us accountable for keeping the schedule and provides transparency to the rest of the organization.\n\n#### How to extract value from review sessions\n\nTo make the most of the review session, aim for the following best practices:\n\n* Schedule routine reviews, monthly or quarterly, depending on how frequently the roadmap tends to fluctuate at your organization.\n* Validate alignment between product goals, UX lead time, and technical feasibility by discussing potential risks and dependencies upfront.\n  * Validate that the roadmap reflects current organizational business objectives.\n  * Ensure that design timelines are realistic and consider research or validation needs.\n  * Confirm that the roadmap allocates time for technical preparation, such as technical spikes or investigations, and ensures alignment with broader engineering priorities.\n* Optimize team utilization by considering capacity constraints and ensuring the sequence of work aligns with the team’s skill profile. This includes avoiding periods of underutilization or skill mismatches while effectively planning for situations like staffing level drops during holidays.\n* Right-size scope and set appropriate expectations about what can be achieved. We all want to do it all, but perfection is the enemy of progress so prioritize what truly matters to deliver incremental value efficiently. Seek opportunities to optimize by identifying ways to iterate or increase velocity, such as adjusting the order of work to reduce dependencies or leveraging reusable components to streamline development.\n* Encourage open dialogue about trade-offs and priorities to ensure all perspectives are considered. This collaborative approach helps identify creative solutions to challenges and builds consensus on the best path forward.\n\n***GitLab tip**: Use a [GitLab Wiki](https://docs.gitlab.com/ee/user/project/wiki/) page to complement the [Roadmap](https://docs.gitlab.com/ee/user/group/roadmap/) feature. In the wiki, you can include expanded context about your product roadmap, such as business rationale, links to user research, RICE scores, and details about dependencies or risks. Link directly to the roadmap for easy access, and leverage the upcoming discussion threads feature to encourage async collaboration and feedback from your team.*\n\n![PlanFlow product roadmap](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097239/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097239118.png)\n\n## Continuous direction validation and progress measurement\n\nThe goal of a product roadmap isn’t just to stay on track – it’s to deliver real value to your customers. To make space for sharing ongoing user feedback and behavioral data consider incorporating regular touchpoints across your product trio outside of sprint cycles. These sessions can be used to review insights, analyze trends, and ensure that the product roadmap continues to reflect the evolving needs of your users. By grounding roadmap updates using real user insights, you’re not only delivering on outcomes but also adapting to what really matters to your customers.\n\nThe value you ship might come in the form of improved usability, reduced technical debt, or entirely new capabilities. When the product trio is aligned on the roadmap vision, they’re also aligned on the outcomes you’re working to achieve.\n\nTo measure whether you’re on track to deliver those outcomes, you need to closely scope the intended results. Scope creep, like late user story additions, can delay your ability to ship value. Additionally, it’s important to identify work that was delivered but doesn’t align with the roadmap and understand why.\n\n### Sprint planning\n\nRemaining aligned with your product roadmap starts with thoughtful sprint planning. Here are some best practices to keep your team on track and focused on delivering value:\n\n* Clearly define, and narrowly scope, desired outcomes to ensure high confidence in delivery.\n* Identify potential late additions or adjustments that could delay delivery, and build in buffers to maintain focus.\n* Align on the sequence of work with your team to optimize for capacity, skill profiles, and reducing dependencies.\n* To maintain focus and improve confidence of delivering on time, avoid planning to 100% of the team’s capacity. Leave room (10%-20%) for unknowns or new discoveries that may surface during the sprint.\n\n### During the sprint\n\nStaying aligned with your roadmap during the sprint requires focus, communication, and constant evaluation. While delivering value is the goal, it’s equally important to ensure the work in progress aligns with the outcomes you’ve scoped and planned.\n\n* Continuously validate the work in progress against roadmap outcomes to ensure every sprint contributes to the bigger picture.\n* Encourage the team to regularly check if they’re still working toward the intended goals and outcomes.\n* Maintain open communication throughout the sprint. Use daily standups or async updates to surface risks, unplanned work, or dependencies early and adjust where necessary.\n* Be ruthless about protecting the sprint. While the urge to solve emerging problems is natural, unplanned work should be carefully evaluated to avoid derailing agreed-upon priorities.\n* Proactively manage scope creep. If new work surfaces mid-sprint, assess whether it aligns with the current roadmap outcome’s narrowly scoped focus. While additional ideas or features may align conceptually with the broader outcome, they may not fit into the immediate plan to deliver value as soon as possible. Document these suggestions and evaluate if they should be considered as part of future iterations or as a nice-to-have for the future, rather than introducing them into the current sprint and delaying agreed-upon priorities.\n\n### Sprint retros\n\nIn your sprint retrospectives, take time to reflect with your team on how well you are collectively progressing toward your desired outcomes. Questions to ask:\n\n* Did any unplanned work get introduced during the sprint that delayed your ability to deliver value? Identify why it happened and what adjustments can be made.\n* Did you deliver any work that deviated from the roadmap? Discuss what led to this and what you can learn for future planning.\n\nFrom sprint planning through retrospectives, staying focused on delivering tangible outcomes to users and stakeholders is a team responsibility. By aligning every step of the way, you ensure that your roadmap remains a clear guide for delivering value efficiently and consistently.\n\n***GitLab tip:** Use [burndown charts](https://docs.gitlab.com/ee/user/project/milestones/burndown_and_burnup_charts.html) to visualize progress and detect deviations early, helping your team stay focused on delivering outcomes.*\n\n![Burndown chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097239/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097239120.png)\n\n## Delivering roadmap outcomes with confidence\n\nHarmonizing Agile sprints with strategic roadmaps requires intentionality, team buy-in, and the proper tools. By creating a roadmap single source of truth, fostering collaborative reviews, and measuring progress towards outcomes, you can align execution with vision. With GitLab’s robust planning features, teams can turn challenges into opportunities for innovation and growth.\n\nReady to align your sprints with your strategic roadmap? [Start a free trial of GitLab](https://about.gitlab.com/free-trial/) today and explore the tools that can help you deliver outcomes with confidence.\n\n## Learn more\n\n- [Agile planning content hub](https://about.gitlab.com/blog/categories/agile-planning/)\n- [GitLab’s new Planner role for Agile planning teams](https://about.gitlab.com/blog/introducing-gitlabs-new-planner-role-for-agile-planning-teams/)\n- [Get to know the GitLab Wiki for effective knowledge management](https://about.gitlab.com/blog/get-to-know-the-gitlab-wiki-for-effective-knowledge-management/)",[840,857,841,542],{"slug":870,"featured":91,"template":844},"how-to-harmonize-agile-sprints-with-product-roadmaps",{"category":732,"slug":736,"posts":872},[873,886,898],{"content":874,"config":884},{"title":875,"description":876,"authors":877,"heroImage":879,"date":880,"body":881,"category":736,"tags":882},"Modernize Java applications quickly with GitLab Duo with Amazon Q","Transform legacy Java 8 applications to Java 17 in minutes instead of weeks using AI-powered automation.",[878],"Cesar Saavedra","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659604/Blog/Hero%20Images/Screenshot_2024-11-27_at_4.55.28_PM.png","2025-10-22","Upgrading applications to newer, supported versions of Java has traditionally been a tedious and time-consuming process. Development teams must spend countless hours learning about deprecated APIs, updated libraries, and new language features. In many cases, significant code rewrites are necessary, turning what should be a straightforward upgrade into a multi-week project that diverts resources from building new features.\n\n[GitLab Duo with Amazon Q](https://about.gitlab.com/gitlab-duo/duo-amazon-q/) changes this paradigm entirely with AI-powered automation. What once took weeks can now be accomplished in minutes, with full traceability and ready-to-review merge requests that maintain your application's functionality while leveraging modern Java features.\n\n## How it works: Upgrade your Java application\n\nLet's walk through how you can modernize a Java 8 application to Java 17.\n\n**Start with an issue**\n\nFirst, create an issue in your GitLab project describing your modernization goal. You don't need to specify version details - GitLab Duo with Amazon Q is able to detect that your application is currently built with Java 8 and needs to be upgraded. Simply describe that you want to refactor your code to Java 17 in the issue title and description.\n\n**Trigger the transformation**\n\nOnce your issue is created, invoke GitLab Duo with Amazon Q using the `/q transform` command in a comment on the issue. This simple command sets in motion an automated process that will analyze your entire codebase, create a comprehensive upgrade plan, and generate all necessary code changes.\n\n**Automated analysis and implementation**\n\nBehind the scenes, Amazon Q analyzes your Java 8 codebase to understand your application's structure, dependencies, and implementation patterns. It identifies deprecated features, determines which Java 17 constructs can replace existing code, and creates a merge request with all the necessary updates. The transformation updates not just your source code files — including CLI, GUI, and model classes — but also your build configuration files like `pom.xml` with Java 17 settings and dependencies.\n\n**Review and verification**\n\nThe generated merge request provides a complete view of all changes. You can review how your code has been modernized with Java 17 language features and verify that all tests still pass. The beauty of this approach is that all functionality is preserved and your application works exactly the same way, just with improved, more modern code.\n\n## Why use GitLab Duo with Amazon Q\n\nLeveraging GitLab Duo with Amazon Q for application modernization has a number of advantages for development teams:\n\n**Time reduction**: What traditionally takes weeks of developer effort is reduced to hours or minutes, freeing your team to focus on building new features rather than managing technical debt.\n\n**Minimized risk**: The automated analysis and transformation process reduces the risk of human error that often accompanies manual code migrations. Every change is traceable and reviewable through GitLab's merge request workflow.\n\n\n**Complete audit trail**: Every transformation is documented through GitLab's version control, providing a clear record of what changed and why, which is essential for compliance and troubleshooting.\n\n**Enterprise-grade security**: The integration leverages GitLab's end-to-end security features and AWS's robust cloud infrastructure, helping to ensure your code and data remain protected throughout the modernization process.\n\nAre you ready to see GitLab Duo with Amazon Q in action? Watch our complete walkthrough video demonstrating the Java modernization process from start to finish: \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/qGyzG9wTsEo?si=47JnSb6flOgZAJcR\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n> To learn more about GitLab Duo with Amazon Q visit our [web site](https://about.gitlab.com/gitlab-duo/duo-amazon-q/) or reach out to your GitLab representative.\n\n## Read more\n\n- [Agentic AI guides and resources](https://about.gitlab.com/blog/agentic-ai-guides-and-resources/)  \n- [GitLab Duo with Amazon Q: DevSecOps meets agentic AI](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-devsecops-meets-agentic-ai/)  \n- [More GitLab Duo with Amazon Q tutorials](https://about.gitlab.com/blog/agentic-ai-guides-and-resources/#gitlab-duo-with-amazon-q-tutorials)\n\n",[732,883,857,812,856],"AWS",{"featured":6,"template":844,"slug":885},"modernize-java-applications-quickly-with-gitlab-duo-with-amazon-q",{"content":887,"config":896},{"heroImage":888,"title":889,"description":890,"authors":891,"date":893,"body":894,"category":736,"tags":895},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1760970883/asrc2c2hejqp5o1tan4c.png","GitLab 18.5: Intelligence that moves software development forward","GitLab 18.5 delivers new specialized agents, security insights that cut through the noise, and a reimagined interface that keeps your AI teammate always in view.",[892],"Bill Staples","2025-10-21","Software development teams are drowning in noise. Thousands of vulnerabilities flood security dashboards, but only a fraction pose real risk. Developers context-switch between planning backlogs, triaging security findings, reviewing code, and responding to CI/CD failures — losing hours to manual work. [GitLab 18.5](https://about.gitlab.com/releases/2025/10/16/gitlab-18-5-released/) calms this chaos.\n\nAt the heart of this release is a valuable improvement in overall usability of GitLab and how AI integrates into your user experience. A new panel-based UI makes it easier to see data in context, and allows GitLab Duo Chat to be persistently visible across the platform, wherever it is needed. Purpose-built agents tackle vulnerability triage and backlog management, and popular AI tools integrate with agentic workflows even more seamlessly than before. We’ve also extended our market-leading security capabilities to help you better identify exploitable vulnerabilities versus theoretical ones, distinguish active credentials from expired ones, and scan only changed code to keep developers in flow.\n\n## What’s new in 18.5\n\n18.5 represents our biggest release so far this year — watch our introduction to the release, and read more details below. \n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1128975773?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"GitLab_18.5 Release_101925_MP_v2\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n\u003Cp>\u003C/p>\n\n### Modern user experience with quick access to GitLab Duo everywhere\n\nGitLab 18.5 improves the GitLab user experience with a more usable, intuitive interface driven by a new panel-based layout. \n\nPanels present information side by side, allowing you to work more contextually. When you click on an issue in the issues list, you will see the details in its side panel. You can then open the GitLab Duo Chat panel on the right side of the interface as an on-demand assistant, allowing you to engage your agents with contextual questions and instructions from anywhere in the GitLab experience.  Other subtle, but usability-driven improvements include the move of the global search box to the top center for improved accessibility, while global navigation elements — including My Issues, Merge Requests, To-Dos, and the user icon — relocate to the top right. The left navigation menu now collapses and expands to provide flexible sidebar management.\n\nThe panel UI will be \"default-off\" in GitLab 18.5, with an opt-in toggle available located under your user icon. To learn more about how to enable or disable this feature, reference the documentation [here](https://docs.gitlab.com/user/interface_redesign/#turn-new-navigation-on-or-off). Please share your feedback and file bugs on anything you don’t love! Our engineers are listening. Assuming you love the experience as much as our own team, this toggle is expected to be removed in 18.6, making the panel UI standard across all user experiences. \n\n### Updates to GitLab Duo Agent Platform\n\n**Security Analyst Agent: Transform manual vulnerability triage into intelligent automation**\n\nGitLab Duo [Security Analyst Agent](https://docs.gitlab.com/user/duo_agent_platform/agents/foundational_agents/security_analyst_agent/) automates vulnerability management workflows through AI-powered analysis, helping transform hours of manual triage into intelligent automation. Building on the Vulnerability Management Tools available through GitLab Duo Agentic Chat, Security Analyst Agent orchestrates multiple tools, applying security policies, and creating custom flows for recurring workflows automatically.\n\nSecurity teams can access enriched vulnerability data, including CVE details, static reachability analysis, and code flow information, while executing operations like dismissing false positives, confirming threats, adjusting severity levels, and creating linked issues for remediation — all through conversational AI. The agent reduces repetitive clicking through vulnerability dashboards and replaces custom scripts with simple natural language commands.\n\nFor example, when a security scan reveals dozens of vulnerabilities, simply prompt: \"Dismiss vulnerabilities with reachable=FALSE and create issues for critical findings.\" Security Analyst Agent analyzes reachability data, applies security policies, and completes bulk operations in moments — helping decrease work that would otherwise take hours. \n\nWhile individual Vulnerability Management Tools can be accessed directly through Agentic Chat for specific tasks, Security Analyst Agent orchestrates these tools intelligently and automates complex multi-step workflows. Note that Vulnerability Management Tools are available through Agentic Chat on GitLab Self-managed and GitLab.com instances, and Security Analyst Agent is available on GitLab.com only for 18.5, while availability in Self-managed and Dedicated environments will come with our next release.\nWatch this demo:\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1128975984?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"18.5 Security Demo\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n\u003Cp>\u003C/p>\n\n**GitLab Duo Planner: Turn backlog chaos into strategic clarity**\n\nManaging complex software delivery requires constant context-switching between planning tasks. [GitLab Duo Planner](https://docs.gitlab.com/user/duo_agent_platform/agents/foundational_agents/planner/) addresses the real-world planning challenges we see teams face every day. Duo Planner acts as your teammate with awareness of your project context, including how you manage issues, epics, and merge requests. Unlike generic AI assistants, it's purpose-built with deep knowledge of GitLab's planning workflows coupled with Agile and prioritization frameworks to help you balance effort, risk, and strategic alignment.\n\nGitLab Duo Planner can turn vague ideas into structured planning hierarchies, identify stale backlog items, and draft executive updates. For example, when refining your backlog with hundreds of issues accumulated over months, simply prompt: \"Identify stale backlog items and suggest priorities.\" Within seconds, you'll receive a structured summary showing issues without recent activity, items missing key details, duplicate work, and recommended priorities based on labels and milestones, complete with actionable recommendations. \n\nFor teams managing complex roadmaps, the Planner aims to eliminate hours of manual analysis and context-switching, helping Product Managers and engineering leads make faster, more informed decisions. As of 18.5, GitLab Duo Planner is currently “read-only,” meaning that it can analyze, plan, and suggest, but cannot yet take direct action to modify anything. Please see our [documentation](https://docs.gitlab.com/user/duo_agent_platform/agents/foundational_agents/planner/) for more information. \n\n**Extensible Agent Catalog: Popular AI tools as native GitLab agents**\n\nGitLab 18.5 introduces popular AI agents directly into the [AI Catalog](https://docs.gitlab.com/user/duo_agent_platform/ai_catalog/), making external tools like Claude, OpenAI Codex, Google Gemini CLI, Amazon Q Developer, and OpenCode available as native GitLab agents. Users can now discover, configure, and deploy these agents through the same unified catalog interface used for GitLab's built-in agents, with automatic syncing of foundational agents across organization catalogs. \n\nThis eliminates the complexity of manual agent setup by providing a point-and-click catalog experience while maintaining enterprise-grade security through GitLab's authentication and audit systems. GitLab Duo Enterprise subscriptions now include built-in usage of Claude and Codex within GitLab, allowing you to use your existing GitLab subscription for these tools without requiring separate API keys or additional billing setup. Other agents may still require separate subscriptions and configuration while we finalize our integration plans.\n\n**Self-hosted GitLab Duo Agent Platform (Beta): Address data sovereignty requirements without sacrificing AI power**\n\nGitLab 18.5 moves GitLab Duo Agent Platform's self-hosted capabilities from experimental to beta, enabling organizations to execute AI agents and flows entirely within their own infrastructure — critical for regulated industries and data sovereignty requirements. The beta release includes improved timeout configurations and AI Gateway settings, allowing teams to use AI agents for code reviews, bug fixes, and feature implementations, while providing enterprise-grade security for sensitive code.\n\n## Smarter, faster security: Prioritize real risks and keep developers in the flow\n\nGitLab 18.5 introduces new application security capabilities that help teams focus on exploitable risk, reduce noise, and strengthen software supply chain security. These updates continue our commitment to building security directly into the development process — delivering precision, speed, and insight without disrupting developer flow.\n\n**Static Reachability Analysis**\n\nWith over [37,000 new CVEs](https://www.cvedetails.com/) issued this year, security teams face an overwhelming volume of vulnerabilities and struggle to understand which ones are truly exploitable. Static Reachability Analysis, now in limited availability, brings library-level precision by helping to identify whether vulnerable code is actually invoked in your application, not just present in dependencies. \n\nPaired with our [recently released](https://docs.gitlab.com/user/application_security/vulnerabilities/risk_assessment_data/) Exploit Prediction Scoring System (EPSS) and Known Exploited Vulnerability (KEV) data, security teams can more effectively accelerate vulnerability triage and prioritize real risks to help strengthen overall supply chain security. In 18.5, we’re adding support for Java, alongside existing support for Python, JavaScript, and TypeScript. \n\n**Secret Validity Checks**\n\nJust as Static Reachability Analysis helps teams prioritize exploitable vulnerabilities from open source dependencies, Secret Validity Checks bring the same insight to exposed secrets — currently available in beta on GitLab.com and GitLab Self-Managed. For GitLab-issued security tokens, instead of manually verifying whether a leaked credential or API key is active, GitLab automatically distinguishes active secrets from expired ones directly in the [Vulnerability Report](https://docs.gitlab.com/user/application_security/vulnerability_report/). This helps enable security and development teams to focus remediation efforts on genuine risks. Support for AWS- and GCP-issued secrets is planned for future releases. \n\n**Custom rules for Advanced SAST**\n\nAdvanced SAST runs on rules informed by our in-house security research team, designed to maximize accuracy out of the box. However, some teams required additional flexibility to tune the SAST engine for their specific organization. With Custom Rules for Advanced SAST, AppSec teams can define atomic, pattern-based detection logic to help capture security issues specific to their organization — like flagging banned function calls — while still using GitLab’s curated ruleset as the baseline. Customizations are managed through simple TOML files, just like other SAST ruleset configurations. While these rules will not support taint analysis, they do give organizations greater flexibility in achieving accurate SAST results. \n\n**Advanced SAST C and C++ language support** \n\nWe’re expanding our language coverage for Advanced SAST to include C and C++, which are widely used languages in embedded systems software development. To enable scanning, projects must generate a compilation database that captures compiler commands and includes paths used during builds. This works to ensure the scanner can accurately parse and analyze source files, delivering precise, context-aware results that help security teams identify real vulnerabilities in the development process. The implementation requirements for C and C++ require specific configurations, which can be found in our [documentation](https://docs.gitlab.com/user/application_security/sast/cpp_advanced_sast/). Advanced SAST C and C++ support are currently available in beta. \n\n**Diff-based SAST scanning** \n\nTraditional SAST scans re-analyze entire codebases with every commit, slowing pipelines and disrupting developer flow. The developer experience is a critical consideration that can make or break the adoption of application security testing. Diff-based SAST scanning aims to speed up scan times by focusing only on the code changed in a merge request, reducing redundant analysis and surfacing relevant results tied to the developer’s work. By aligning scans with actual code changes, GitLab delivers faster, more focused feedback that helps keep developers in flow while maintaining strong security coverage.\n\n## Simplify API configurations\n\nAPI-driven workflows offer power and flexibility, but they can also create unnecessary complexity for tasks that teams need to perform regularly. The new Maven Virtual Registry interface brings a UI layer to these operations.\n\n### Maven Virtual Registry interface\n\nThe new web-based interface for managing Maven Virtual Registries turns complex API configurations into visual simplicity, providing a more intuitive experience for package administrators and platform engineers.\n\nPreviously, teams configured and maintained virtual registries only through API calls, which made routine maintenance time-consuming and required specialized platform knowledge. The new interface removes that barrier, helping to make everyday tasks faster and easier.\n\nWith this update, you can now:\n\n* Create virtual registries to simplify dependency configuration  \n* Create and order upstreams to help improve performance and compliance  \n* Browse and clear stale cache entries directly in the UI\n\nThis visual experience helps reduce operational overhead and provides development teams with clearer insight into how dependencies are resolved, enabling them to make better decisions about build performance and security policies.\n\nWatch a demo:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://www.youtube.com/embed/CiOZJPhAvaI?si=cYaoR_OIgqFKbyM2\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cp>\u003C/p>\n\nWe invite enterprise customers to join the [Maven Virtual Registry Beta program](https://gitlab.com/gitlab-org/gitlab/-/issues/543045) and share feedback to help shape the final release. \n\n## AI that adapts to your workflow\n\nThis release represents more than new capabilities — it's about choice and control. Watch the walkthrough video here:\n\n\u003Cp>\u003C/p>\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1128992281?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"18.5-tech-demo\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n\u003Cp>\u003C/p>\n\nGitLab Premium and Ultimate users can start using these capabilities today on [GitLab.com](https://GitLab.com) and self-managed environments, with availability for GitLab Dedicated customers planned for next month. \n\nGitLab Duo Agent Platform is currently in **beta** — enable beta and experimental features to experience how full-context AI can transform the way your teams build software. New to GitLab? [Start your free trial](https://about.gitlab.com/free-trial/devsecops/) and see why the future of development is AI-powered, secure, and orchestrated through the world’s most comprehensive DevSecOps platform.\n\n***Note:** Platform capabilities that are in beta are available as part of the GitLab Beta program. They are free to use during the beta period, and when generally available, they will be made available with a paid add-on option for GitLab Duo Agent Platform.*\n\n### Stay up to date with GitLab\n\nTo make sure you’re getting the latest features, security updates, and performance improvements, we recommend keeping your GitLab instance up to date. The following resources can help you plan and complete your upgrade:\n\n* [Upgrade Path Tool](https://gitlab-com.gitlab.io/support/toolbox/upgrade-path/) – enter your current version and see the exact upgrade steps for your instance  \n* [Upgrade Documentation](https://docs.gitlab.com/update/upgrade_paths/) – detailed guides for each supported version, including requirements, step-by-step instructions, and best practices\n\nBy upgrading regularly, you’ll ensure your team benefits from the newest GitLab capabilities and remains secure and supported.\n\nFor organizations that want a hands-off approach, consider [GitLab’s Managed Maintenance service](https://content.gitlab.com/viewer/d1fe944dddb06394e6187f0028f010ad#1). With Managed Maintenance, your team stays focused on innovation while GitLab experts keep your Self-Managed instance reliably upgraded, secure, and ready to lead in DevSecOps. Ask your account manager for more information. \n\n*This blog post contains \"forward‑looking statements\" within the meaning of Section 27A of the Securities Act of 1933, as amended, and Section 21E of the Securities Exchange Act of 1934. Although we believe that the expectations reflected in these statements are reasonable, they are subject to known and unknown risks, uncertainties, assumptions and other factors that may cause actual results or outcomes to differ materially. Further information on these risks and other factors is included under the caption \"Risk Factors\" in our filings with the SEC. We do not undertake any obligation to update or revise these statements after the date of this blog post, except as required by law.*",[856,812,542],{"featured":91,"template":844,"slug":897},"gitlab-18-5-intelligence-that-moves-software-development-forward",{"content":899,"config":908},{"title":900,"description":901,"authors":902,"heroImage":904,"date":905,"body":906,"category":736,"tags":907},"Claude Haiku 4.5 now available in GitLab Duo Agentic Chat","Anthropic's fastest model with near-frontier coding performance is a model option for GitLab Duo Pro, Duo Enterprise, and Duo Agent Platform.",[903],"Tim Zallmann","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097183/Blog/Hero%20Images/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25_7JlF3WlEkswGQbcTe8DOTB_1750097183481.png","2025-10-20","GitLab now offers Claude Haiku 4.5, Anthropic's fastest model combining high intelligence with exceptional speed, directly in the GitLab Duo model selector.\n\nUsers have the flexibility to choose Claude Haiku 4.5 alongside other leading models, enhancing their GitLab Duo experience with near-frontier performance at remarkable speed. With strong performance on [SWE-bench Verified (73.3%)](https://www.anthropic.com/news/claude-haiku-4-5) and more than 2x the speed of Claude Sonnet 4.5, GitLab users can apply Claude Haiku 4.5 to accelerate their development workflows with rapid, intelligent responses.\n\n## GitLab Duo Agent Platform + Claude Haiku 4.5\n\n[GitLab Duo Agent Platform](https://about.gitlab.com/gitlab-duo/agent-platform/) extends the value of Claude Haiku 4.5 by enabling multi-agent orchestration, where Claude Haiku 4.5 can serve as a fast sub-agent executing parallel tasks while more powerful models handle high-level planning. This combination creates efficient agentic workflows, where speed meets intelligence across the software development lifecycle. The result is faster iterations, cost-effective AI assistance, and responsive experiences, all delivered inside the GitLab workflow developers already use every day.\n\n## Where you can use Claude Haiku 4.5\n\nClaude Haiku 4.5 is now available as a model option in GitLab Duo Agent Platform Agentic Chat on GitLab.com. You can choose Claude Haiku 4.5 from the model selection dropdown to leverage its speed and coding capabilities for your development tasks.\n\n**Note:** Ability to select Claude Haiku 4.5 in supported IDEs will be available soon.\n\nKey capabilities:\n\n* **Superior coding performance:** Achieves 73% on SWE-bench Verified, matching the intelligence level of models that were cutting-edge just months ago.  \n* **Lightning-fast responses:** More than 2x faster than Sonnet 4.5, perfect for real-time pair programming.  \n* **Enhanced computer use:** Outperforms Claude Sonnet 4 at autonomous task execution.  \n* **Context awareness:** First Haiku model with native context window tracking for better task persistence.  \n* **Extended thinking:** Pause and reason through complex problems before generating responses.\n\n## Get started today\n\nGitLab Duo Pro and Enterprise customers can access Claude Haiku 4.5 today. Visit our [documentation](https://docs.gitlab.com/user/gitlab_duo/) to learn more about GitLab Duo capabilities and models.\n\nQuestions or feedback? Share your experience with us through the GitLab community.\n\n> Want to try GitLab Ultimate with Duo Enterprise? [Sign up for a free trial today.](https://about.gitlab.com/gitlab-duo/)\n## Read more\n- [Greater AI choice in GitLab Duo: Claude Sonnet 4.5 arrives](https://about.gitlab.com/blog/greater-ai-choice-in-gitlab-duo-claude-sonnet-4-5-arrives/)\n- [GitLab 18.4: AI-native development with automation and insight](https://about.gitlab.com/blog/gitlab-18-4-ai-native-development-with-automation-and-insight/)\n- [GitLab Duo Chat gets agentic AI makeover](https://about.gitlab.com/blog/gitlab-duo-chat-gets-agentic-ai-makeover/)",[732,856,812],{"featured":91,"template":844,"slug":909},"claude-haiku-4-5-now-available-in-gitlab-duo-agentic-chat",{"category":744,"slug":748,"posts":911},[912,924,935],{"content":913,"config":922},{"title":914,"description":915,"authors":916,"heroImage":918,"date":919,"body":920,"category":748,"tags":921},"Improving GitLab's deletion flow: What to expect in coming months","GitLab is enhancing its deletion flow for groups and projects with features like pending deletion, self-service recovery, and an extended 30-day recovery window. Here's what you need to know.\n",[917],"Christina Lohr","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663000/Blog/Hero%20Images/tanukilifecycle.png","2025-07-14","At GitLab, we're committed to continuously improving your experience across our platform. Today, we're excited to announce significant enhancements to our deletion flow for groups and projects. We are rolling out a series of improvements designed to protect your data, simplify recovery, and create a more intuitive experience across all pricing tiers.\n\n## Why we're making these changes\n\nOur current deletion flow has some inconsistencies that can lead to frustrating experiences. Free tier users have had limited or no options for recovering accidentally deleted content, projects in personal namespaces haven't had the same protections as those in groups, and group namespace paths have remained locked after deletion, preventing immediate reuse.\n\nWe've heard your feedback, and we're addressing these pain points with a comprehensive redesign of our deletion flow that will be rolled out in multiple iterations.\n\n## What has changed already\n\nOver the past quarter, we have implemented fundamental improvements to create a consistent deletion experience across all pricing tiers. These changes have eliminated the frustration of accidentally deleting important content with no recovery option.\n\n* [**Pending deletion for all users**](https://about.gitlab.com/releases/2025/05/15/gitlab-18-0-released/#deletion-protection-available-for-all-users)**:** All deleted projects and groups now enter a \"pending deletion\" state before being permanently deleted, regardless of their pricing tier.  \n* [**Self-service recovery**](https://about.gitlab.com/releases/2025/05/15/gitlab-18-0-released/#delayed-project-deletion-for-user-namespaces)**:** You can now restore your own content without contacting support, giving you more control and autonomy over your data.  \n* [**Clear status indicators**](https://gitlab.com/gitlab-org/gitlab/-/issues/502234)**:** We have standardized how deletion status is displayed across the platform, making it immediately clear when content is pending deletion.  \n* **Extended recovery window:** On July 10, 2025, we increased the pending deletion period from 7 to 30 days on GitLab.com. This means you now have ample time to recover from accidental deletions.\n\n## What's coming next\n\n### Currently in development\n\nBuilding on the foundation established in our first iteration, we are further enhancing your deletion experience with two key improvements:\n\n* [**Admin area consistency**](https://gitlab.com/groups/gitlab-org/-/epics/17372)**:** Deletions initiated from the Admin area will follow the same pending deletion process as deletions initiated directly from the group or project level, creating a unified experience across all access points.  \n* [**Immediate path reuse**](https://gitlab.com/gitlab-org/gitlab/-/issues/526081)**:** When you delete a project or group, its namespace path will be automatically renamed, allowing you to immediately reuse the original path for new content. This will remove the waiting period currently required to reuse namespace paths.\n\n### Planned for future release\n\nThe final phase will introduce a redesigned deletion experience that completes our vision for a modern, intuitive deletion system:\n\n* **Centralized \"Trash\" interface:** All your deleted content will be accessible in a dedicated \"Trash\" section, providing a familiar paradigm similar to what you're used to in other applications.  \n* [**Clear action separation**](https://gitlab.com/gitlab-org/gitlab/-/issues/541182)**:** We will create a clear distinction between \"Delete\" (temporary, recoverable) and \"Delete Permanently\" (irrevocable) actions to prevent accidental data loss.  \n* **Bulk management:** You'll be able to restore or permanently delete multiple items at once, making cleanup and recovery more efficient.\n\n## How these changes benefit you\n\nThese enhancements deliver several key benefits that will transform your experience with GitLab's deletion functionality.\n\n* **Protection against data loss** is provided through pending deletion and self-service recovery available across all tiers, giving you a safety net against accidental deletions. The **consistent experience** ensures the same deletion flow applies to all projects and groups, eliminating inconsistencies across the platform.\n\n* You'll gain **greater control** through enhanced visibility and management options for deleted content, with a familiar interface that makes recovery intuitive. **Improved workflow** efficiency will result from immediate path reuse and bulk management capabilities that streamline your content organization process.\n\n* Most importantly, you'll have **peace of mind** knowing that the extended 30-day recovery window ensures ample opportunity to recover important data, while the clear separation between temporary and permanent deletion actions prevents accidental data loss.\n\n## Your feedback matters\n\nAs always, we value your input. Please leave feedback in [the feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/538165).",[812],{"featured":6,"template":844,"slug":923},"improving-gitlab-deletion-flow-what-to-expect-in-coming-months",{"content":925,"config":933},{"title":926,"description":927,"authors":928,"heroImage":918,"date":930,"body":931,"category":748,"tags":932},"GitLab rotating Omnibus Linux package signing key","Learn who is impacted by the rotation of the GNU Privacy Guard (GPG) key and what you need to know.",[929],"GitLab","2025-04-16","As part of our standard security practices, GitLab is rotating the GNU Privacy Guard (GPG) key used to sign all Omnibus Linux packages on April 16, 2025. This key ensures the integrity of our packages, verifying that they have not been tampered with after creation in our CI pipelines. This key is distinct from the repository metadata signing key used by package managers and the GPG signing key for the GitLab Runner. GitLab is revoking the existing key and will begin signing upcoming packages using a new key with fingerprint `98BF DB87 FCF1 0076 416C 1E0B AD99 7ACC 82DD 593D`.\n\n**What do I need to do?**\n\nIf you currently validate the GPG signatures of GitLab Omnibus packages, you will need to update your copy of the package signing key. Packages published before this article will remain signed with the previous key.\n\nThe package signing key is separate from the repository metadata signing key used by your operating system’s package managers (like `apt` or `yum`). Unless you are specifically verifying package signatures or have configured your package manager to verify the package signatures, no action is required to continue installing GitLab Omnibus packages.\n\n**Where can I find the new key?**\n\nThe new key can be downloaded from `packages.gitlab.com` using the URL:\n\n[https://packages.gitlab.com/gitlab/gitlab-ee/gpgkey/gitlab-gitlab-ee-CB947AD886C8E8FD.pub.gpg](https://packages.gitlab.com/gitlab/gitlab-ee/gpgkey/gitlab-gitlab-ee-CB947AD886C8E8FD.pub.gpg)\n\nPlease check the documentation for more information concerning [verification of the package signatures](https://docs.gitlab.com/omnibus/update/package_signatures#package-signatures).\n\n**What do I do if I still have problems?**\n\nPlease open an issue in the [omnibus-gitlab issue tracker](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/new?issue&issuable_template=Bug).",[542,812,822],{"slug":934,"featured":6,"template":844},"gitlab-rotating-omnibus-linux-package-signing-key",{"content":936,"config":945},{"title":937,"description":938,"authors":939,"heroImage":941,"date":942,"body":943,"category":748,"tags":944},"Prepare now: Docker Hub rate limits will impact GitLab CI/CD","Learn how Docker Hub's upcoming pull rate limits will affect GitLab pipelines and what you can do to avoid disruptions.",[940],"Tim Rizzi","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662488/Blog/Hero%20Images/blog-image-template-1800x945__3_.png","2025-03-24","On April 1, 2025, Docker will implement new [pull rate\nlimits](https://docs.docker.com/docker-hub/usage/) to Docker Hub that may\nsignificantly impact CI/CD pipelines across the industry, including those\nrunning on GitLab. The most significant change is the 100 pulls-per-6-hours\nlimit for unauthenticated users.\n\n\n## What's changing?\n\n\nStarting April 1, Docker will enforce the following pull rate limits:\n\n\n| User type | Pull rate limit per hour | Number of public repositories | Number of private repositories |\n|-----------|----------------|--------------|------------------|\n| Business, Team, Pro (authenticated) | Unlimited (fair use) | Unlimited | Unlimited |\n| Personal (authenticated) | 200 per 6-hour window | Unlimited | Up to 1 |\n| Unauthenticated users | 100 per 6-hour window per IPv4 address or IPv6 /64 subnet | Not applicable | Not applicable |\n\n\n\u003Cp>\u003C/p>\n\nThis is particularly important because:\n\n\n* GitLab's Dependency Proxy currently pulls from Docker Hub as an\nunauthenticated user.\n\n* Most CI/CD pipelines that don't use the Dependency Proxy pull directly\nfrom Docker Hub as unauthenticated users.\n\n* On hosted runners for GitLab.com, multiple users might share the same IP\naddress or subnet, making them collectively subject to this limit.\n\n\n## How this impacts GitLab users\n\n\n**Impact on direct Docker Hub pulls**\n\n\nIf your CI/CD pipelines directly pull images from Docker Hub without\nauthentication, they will be limited to 100 pulls per six-hour window per IP\naddress. For pipelines that run frequently or across multiple projects\nsharing the same runner infrastructure, this will quickly exhaust the limit\nand cause pipeline failures.\n\n\n**Impact on GitLab Dependency Proxy**\n\n\nThe GitLab Dependency Proxy feature allows you to cache Docker images within\nGitLab to speed up pipelines and reduce external dependencies. However, the\ncurrent implementation pulls from Docker Hub as an unauthenticated user,\nmeaning it will also be subject to the 100 pulls-per-6-hours limit.\n\n\n**Impact on hosted runners**\n\n\nFor hosted runners on GitLab.com, we use [Google Cloud's pull-through\ncache](https://cloud.google.com/artifact-registry/docs/pull-cached-dockerhub-images).\nThis mirrors the commonly pulled images and allows us to avoid rate limits.\nJob images defined as `image:` or `services:` in your `.gitlab-ci.yml` file,\nare not affected by rate limits.\n\n\nThings are slightly more challenging whenever images are pulled within the\nrunner environment. The most common use case to pull images during runner\nruntime is to build an image using Docker-in-Docker or Kaniko. In this\nscenario, the Docker Hub image defined in your `Dockerfile` is pulled\ndirectly from Docker Hub and is likely to be affected by rate limits.\n\n\n## How GitLab is responding\n\n\nWe're actively working on solutions to mitigate these challenges:\n\n\n* **Dependency Proxy authentication:** We've added support for Docker Hub\nauthentication in the [GitLab Dependency Proxy\nfeature](https://gitlab.com/gitlab-org/gitlab/-/issues/331741). This will\nallow the Dependency Proxy to pull images from Docker Hub as an\nauthenticated user, significantly increasing the rate limits.\n\n* **Documentation updates:** We've updated our\n[documentation](https://docs.gitlab.com/user/packages/dependency_proxy/#configure-credentials)\nto provide clear guidance on configuring pipeline authentication for Docker\nHub.\n\n* **Internal infrastructure preparation:** We're preparing our internal\ninfrastructure to minimize the impact on hosted runners for GitLab.com.\n\n\n## How you can prepare\n\n\n**Option 1: Configure Docker Hub authentication in your pipelines**\n\n\nFor pipelines that pull directly from Docker Hub, you can configure\nauthentication to increase your rate limit to 200 pulls per six-hour window\n(or unlimited with a paid Docker Hub subscription).\n\n\nAdd Docker Hub credentials to your project or group CI/CD variables (not in\nyour `.gitlab-ci.yml` file). Please refer to our [documentation on using\nDocker\nimages](https://docs.gitlab.com/ci/docker/using_docker_images/#use-statically-defined-credentials)\nfor detailed instructions on setting up the `DOCKER_AUTH_CONFIG` CI/CD\nvariable correctly.\n\n\n**Option 2: Use the GitLab Container Registry**\n\n\nConsider pushing your frequently used Docker images to your [GitLab\nContainer\nRegistry](https://docs.gitlab.com/user/packages/container_registry/). This\neliminates the need to pull from Docker Hub during CI/CD runs:\n\n\n1. Pull the image from Docker Hub.\n\n2. Tag it for your GitLab Container Registry.\n\n3. Push it to your GitLab Container Registry.\n\n4. Update your pipelines to pull from GitLab Container Registry.\n\n\n```\n\ndocker pull busybox:latest\n\ndocker tag busybox:latest $CI_REGISTRY_IMAGE/busybox:latest\n\ndocker push $CI_REGISTRY_IMAGE/busybox:latest\n\n```\n\n\nThen in your `.gitlab-ci.yml`:\n\n\n`image: $CI_REGISTRY_IMAGE/busybox:latest`\n\n\n**Option 3: Use GitLab Dependency Proxy**\n\n\nGitLab's Dependency Proxy feature provides a way to cache and proxy Docker\nimages, reducing external dependencies and rate limit issues.\n\n\nCurrent authentication options:\n\n* GitLab 17.10: Configure Docker Hub authentication for the Dependency Proxy\nusing [GraphQL\nAPI](https://docs.gitlab.com/user/packages/dependency_proxy/#configure-credentials-using-the-graphql-api)\n\n* GitLab 17.11: Use the new UI-based configuration in your group's settings\n(already available on GitLab.com)\n\n\nOnce authentication is properly configured, you can:\n\n\n1. Configure Docker Hub credentials in your group's Dependency Proxy\nsettings:\n  - For GitLab 17.11+ (or current GitLab.com): Navigate to your group's settings > Packages & Registries > Dependency Proxy.\n  - For GitLab 17.10: Use the GraphQL API to configure authentication.\n2. Update your pipelines to use the Dependency Proxy URLs in your CI/CD\nconfiguration:\n\n`image: ${CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX}/busybox:latest`\n\n\n**Option 4: Consider a Docker Hub paid subscription**\n\n\nFor organizations with heavy Docker Hub usage, upgrading to a paid Docker\nsubscription (Team or Business) will provide unlimited pulls, which may be\nthe most straightforward solution.\n\n\n## Best practices to reduce Docker Hub rate limit impact\n\n\nRegardless of which option you choose, consider these best practices to\nminimize Docker Hub rate limit impact:\n\n\n* Use specific image tags instead of `latest` to avoid unnecessary pulls.\n\n* Consolidate your Docker files to use the same base images across projects.\n\n* Schedule less critical pipelines to run outside of peak hours.\n\n* Use caching effectively to avoid pulling the same images repeatedly.\n\n\n**Note:** According to Docker Hub\n[documentation](https://docs.docker.com/docker-hub/usage/pulls/#pull-definition),\nthe pull count is incremented when pulling the image manifest, not based on\nimage size or number of layers.\n\n\n## Timeline and next steps\n\n\n**Now**\n  * Implement authentication for direct Docker Hub pulls.\n  * GitLab.com users can already configure Docker Hub authentication for the Dependency Proxy using either:\n    * The GraphQL API, or\n    * The UI in group settings\n  * Self-managed GitLab 17.10 users can configure Dependency Proxy authentication using the GraphQL API.\n\n**April 1, 2025**\n  * Docker Hub rate limits go into effect.\n\n**April 17, 2025**\n  * GitLab 17.11 will be released with UI-based Dependency Proxy authentication support for self-managed instances. \n\nWe recommend taking action well before the April 1 deadline to avoid\nunexpected pipeline failures. For most users, configuring the Dependency\nProxy with Docker Hub authentication is the most efficient long-term\nsolution.\n\n\n> Have questions or need implementation help? Please visit [this\nissue](https://gitlab.com/gitlab-org/gitlab/-/issues/526605) where our team\nis actively providing support.\n",[109,791,542],{"slug":946,"featured":91,"template":844},"prepare-now-docker-hub-rate-limits-will-impact-gitlab-ci-cd",{"category":755,"slug":759,"posts":948},[949,964,975],{"content":950,"config":962},{"title":951,"description":952,"authors":953,"heroImage":955,"date":956,"body":957,"category":759,"tags":958},"The Co-Create Program: How customers are collaborating to build GitLab","Learn how organizations like Thales, Scania, and Kitware are partnering with GitLab engineers to contribute meaningful features that benefit the entire community.",[954],"Fatima Sarah Khalid","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659756/Blog/Hero%20Images/REFERENCE_-_display_preview_for_blog_images.png","2025-01-30","This past year, over 800 community members have made more than 3,000 contributions to GitLab. These contributors include team members from global organizations like Thales, Scania, and Kitware, who are helping shape GitLab's future through the [Co-Create Program](https://about.gitlab.com/community/co-create/) — GitLab's collaborative development program where customers work directly with GitLab engineers to contribute meaningful features to the platform.\n\nThrough workshops, pair programming sessions, and ongoing support, program participants get hands-on experience with GitLab's architecture and codebase while solving issues or improving existing features.\n\n\"Our experience with the Co-Create Program has been incredible,\" explains Sébastien Lejeune, open source advocate at Thales. \"It only took two months between discussing our contribution with a GitLab Contributor Success Engineer and getting it live in the GitLab release.\"\n\nIn this post, we'll explore how customers have leveraged the Co-Create Program to turn their ideas into code, learning and contributing along the way.\n\n## The Co-Create experience\n[The GitLab Development Kit (GDK)](https://gitlab.com/gitlab-org/gitlab-development-kit) helps contributors get started developing on GitLab. \"The advice I would give new contributors is to remember that you can't break anything with the GDK,\" says Hook. \"If you make a change and it doesn't work, you can undo it or start again. The beauty of GDK is that you can tinker, test, and learn without worrying about the environment.\"\n\nEach participating organization in the Co-Create Program receives support throughout their contribution journey:\n\n- __Technical onboarding workshop__: A dedicated session to set up the GitLab Development Kit (GDK) and understand GitLab's architecture\n- __1:1 engineering support__: Access to GitLab engineers for pair programming and technical guidance\n- __Architecture deep dives__: Focused sessions on specific GitLab components relevant to the issue the organization is contributing to\n- __Code review support__: Detailed feedback and guidance through the merge request process\n- __Regular check-ins__: Ongoing collaboration to ensure progress and address any challenges\n\nThis structure ensures that teams can contribute effectively, regardless of their prior experience with GitLab's codebase or the Ruby/Go programming language. As John Parent from Kitware notes, \"If you've never seen or worked with GitLab before, you're staring at a sophisticated architecture and so much code across different projects. The Co-Create Program helps distill what would take weeks of internal training into a targeted crash course.\"\n\nThe result is a program that not only helps deliver new features but also builds lasting relationships between GitLab and its user community. \"It's inspiring for our engineers to see the passion our customers bring to contributing to and building GitLab together,\" shares Shekhar Patnaik, principal engineer at GitLab. \"Customers get to see the 'GitLab way,' and engineers get to witness their commitment to shaping the future of GitLab.\"\n\n## Enhancing project UX with Thales\nWhen Thales identified opportunities to improve GitLab's empty project UI, they didn't just file a feature request — they built the solution themselves. Their contributions focused on streamlining the new project setup experience by simplifying SSH/HTTPS configuration with a tabbed interface and adding copy/paste functionality for the code snippets. These changes had a significant impact on developer workflows.\n\nThe team's impact extended beyond the UX improvements. Quentin Michaud, PhD fellow for cloud applications on the edge at Thales, contributed to improving the GitLab Development Kit (GDK). As a package maintainer for Arch Linux, Michaud's expertise helped improve GDK's documentation and support its containerization efforts, making it easier for future contributors to get started.\n\n\"My open source experience helped me troubleshoot GDK's support for Linux distros,” says Michaud. “While improving package versioning documentation, I saw that GitLab's Contributor Success team was also working to set up GDK into a container. Seeing our efforts converge was a great moment for me — it showed how open source collaboration can help build better solutions.\"\n\nThe positive experience for the Thales team means that Lejeune now uses the Co-Create Program as \"a powerful example to show our managers the return on investment from open source contributions.\"\n\n## Advancing package support with Scania\nWhen Scania needed advanced package support in GitLab, they saw an opportunity to contribute and build it themselves. \n\n\"As long-time GitLab users who actively promote open source within our organization, the Co-Create Program gave us a meaningful way to contribute directly to open source,\" shares Puttaraju Venugopal Hassan, solution architect at Scania.\n\nThe team started with smaller changes to familiarize themselves with the codebase and review process, then progressed to larger features. \"One of the most rewarding aspects of the Co-Create Program has been looking back at the full, end-to-end process and seeing how far we've come,\" reflects Océane Legrand, software developer at Scania. \"We started with discovery and smaller changes, but we took on larger tasks over time. It's great to see that progression.\" \n\nTheir contributions include bug fixes for the package registry and efforts to enhance the Conan package registry feature set, bringing it closer to general availability (GA) readiness while implementing Conan version 2 support. Their work and collaboration with GitLab demonstrates how the Co-Create Program can drive significant improvements to GitLab’s package registry capabilities.\n\n\"From the start, our experience with the Co-Create Program was very organized. We had training sessions that guided us through everything we needed to contribute. One-on-one sessions with a GitLab engineer also gave us an in-depth look at GitLab’s package architecture, which made the contribution process much smoother,\" said Juan Pablo Gonzalez, software developer at Scania. \n\nThe impact of the program goes beyond code — program participants are also building valuable skills as a direct result of their contributions. In [the GitLab 17.8 release](https://about.gitlab.com/releases/2025/01/16/gitlab-17-8-released/#mvp), both Legrand and Gonzalez were recognized as GitLab MVPs. Legrand talked about how the work she's doing in open source impacts both GitLab and Scania, including building new skills for her and her team: \"Contributing through the Co-Create Program has given me new skills, like experience with Ruby and background migrations. When my team at Scania faced an issue during an upgrade, I was able to help troubleshoot because I'd already encountered it through the Co-Create Program.\"\n\n## Optimizing authentication for high-performance computing with Kitware\nKitware brought specialized expertise from their work with national laboratories to improve GitLab's authentication framework. Their contributions included adding support for the OAuth2 device authorization grant flow in GitLab, as well as implementing new database tables, controllers, views, and documentation. This contribution enhances GitLab's authentication options, making it more versatile for devices without browsers or with limited input capabilities.\n\n\"The Co-Create Program is the most efficient and effective way to contribute to GitLab as an external contributor,\" shares John Parent, R&D engineer at Kitware. \"Through developer pairing sessions, we found better implementations that we might have missed working alone.\"\n\nAs a long-time open source contributor, Kitware particularly appreciated GitLab's approach to development. \"I assumed GitLab wouldn't rely on out-of-the-box solutions at its scale, but seeing them incorporate a Ruby dependency instead of building a custom in-house solution was great,” says Parent. “Coming from the C++ world, where package managers are rare, it was refreshing to see this approach and how straightforward it could be.\"\n\n## Building better together: Benefits of Co-Create\nThe Co-Create Program creates value that flows both ways. \"The program bridges a gap between us as GitLab engineers and our customers,\" explains Imre Farkas, staff backend engineer at GitLab. \"As we work with them, we hear their day-to-day challenges, the parts of GitLab they rely on, and where improvements can be made. It's great to see how enthusiastic they are about getting involved in building GitLab with us.\"\n\nThis collaborative approach also accelerates GitLab's development. As Shekhar Patnaik, principal engineer at GitLab, observes: \"Through Co-Create, our customers are helping us move our roadmap forward. Their contributions allow us to deliver critical features faster, benefitting our entire user base. As the program scales, there's a real potential to accelerate development on our most impactful features by working alongside the very people who rely on them.\"\n\n## Get started with Co-Create\nReady to turn your feature requests into reality? Whether you're looking to enhance GitLab's UI like Thales, improve package support like Scania, or optimize authentication like Kitware, the Co-Create Program welcomes organizations who want to actively shape GitLab's future while building valuable open source experience.\n\nContact your GitLab representative to learn more about participating in the Co-Create Program, or visit our [Co-Create page](https://about.gitlab.com/community/co-create/) for more information.\n",[959,960,961],"contributors","open source","customers",{"slug":963,"featured":91,"template":844},"the-co-create-program-how-customers-are-collaborating-to-build-gitlab",{"content":965,"config":973},{"title":966,"description":967,"authors":968,"heroImage":955,"date":970,"body":971,"category":759,"tags":972},"Kingfisher transforming the developer experience with GitLab","Learn how the international company focuses on DevSecOps, including automation, to reduce complexity in workflows for better efficiency.",[969],"Sharon Gaudin","2024-11-12","Kingfisher plc, an international home improvement company, has leaned into GitLab’s end-to-end platform to help it build a DevSecOps foundation that is revolutionizing its developer experience. And the company plans to continue that improvement by increasing its use of platform features, focusing on security, simplifying its toolchain, and increasing the use of automation.\n\n> \u003Cimg align=\"left\" width=\"200\" height=\"200\" hspace=\"5\" vspace=\"5\" alt=\"Chintan Parmar\" src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176076/Blog/ro7u8p695zw9fllbk4j5.png\" style=\"float: left; margin-right: 25px;\"> “The whole point of this is to reduce friction for our engineers, taking away a lot of the complexity in their workflow, and bringing in best practices and governance,” says Chintan Parmar, site reliability engineering manager at Kingfisher. “In terms of what we've done and what we're doing at the moment, it really is about building a foundation in terms of CI/CD and changing the way we deploy to bring in consistency and improve the developer experience.”\n\nParmar talked about his team and their efforts during the [GitLab DevSecOps World Tour event](https://about.gitlab.com/events/epic-conference/) in London last month. In an on-stage interview with Sherrod Patching, vice president of Customer Success Management at GitLab, he laid out Kingfisher’s journey with the platform, which is enabling its teams, while also making it easier and faster to move software updates and new projects from ideation to deployment.\n\n[Kingfisher](https://www.kingfisher.com/en/index.html) is a parent company with more than 2,000 stores in eight countries across Europe. Listed on the London Stock Exchange and part of the Financial Times Stock Exchange (FTSE) 100 Index, the group reported £13 billion in total revenue in FY 2023/24. Its brands include B&Q, Screwfix, Castorama, and Brico Depot.\n\nThe company first adopted GitLab in 2016, using a free starter license, and then moved to Premium in 2020. In that time, it also has moved from on-premise to a cloud environment, started using shared GitLab runners and source code management, and began building out a CI/CD library that gives team members easy access to standardized and reusable components for typical pipeline stages, such as build, deploy, and test.\n\n## Tracking metrics that execs care about\n\nKingfisher also is tracking metrics, like deployment frequency, lead time to change, and change failure rates, with GitLab. And teams are analyzing value streams, mapping workflows, and finding bottlenecks. All of those metrics are being translated into data that company leaders can sink their teeth into.\n\n“Execs may not care about whether a merge request has been waiting 15 or 20 minutes, but they do care about how we translate that time value into dollars or pounds,” says Parmar, who used GitLab when he previously worked at [Dunelm Group, plc,](https://about.gitlab.com/customers/dunelm/) another major UK-based retailer. “Kingfisher is a very data-driven organization. We are looking to overlay these metrics to see where we can continue to improve our developer experience, eliminating slowdowns and manual tasks, while increasing automation.”\n\nWhile on-stage, Parmar made it clear that all the changes being made are aimed at improving software development and deployment. However, it’s equally paramount to making team members’ jobs easier, giving them more time and autonomy to do the kind of work they enjoy, instead of what can seem like a never-ending stream of repetitive, manual tasks. He noted that the team is so focused on easing workflows and giving engineers more time to be innovative, it has created a “developer experience squad.”\n\n## Putting people first while laying out priorities\n\nSo what’s coming next for Kingfisher and its engineering squads, which have about 600 practitioners?\n\nAccording to Parmar, Kingfisher already has its priorities mapped out. Using GitLab to [move security left](https://about.gitlab.com/solutions/application-security-testing/) is at the top of their list. The group also is focused on continuing to reduce its toolchain, and using automation to increase productivity. And he expects that early in 2025, teams will begin “dabbling” with the artificial intelligence capabilities in [GitLab Duo](https://about.gitlab.com/gitlab-duo/), a suite of AI-powered features in the platform that help increase velocity and solve key pain points across the software development lifecycle. Kingfisher will focus on how that can further increase its efficiency and productivity.\n\nTo get all of this done, Parmar says the first step is to ensure that people come first.\n\n“We’re focused on the hearts and minds of our people... and remembering that people can be attached to how they work through pipelines,” he adds. “People have different ways of building their pipelines. We need to understand what they need, what their workflows look like, and then work with them to find the right solution. After, we’ll go back to them with data that shows the improvements worked. So instead of telling them what they need, we find out what that is, and fix what’s slowing them down. That builds a very good rapport with our engineers.”\n\nChanging how a team creates and deploys software is a journey. Parmar suggests that collaboratively taking developers and security teams on that journey, instead of dragging them along, makes a big difference in ease of migration and in easing team members’ user experience.\n\n> Learn [how other GitLab customers use the DevSecOps platform](https://about.gitlab.com/customers/) to gain results for customers.\n",[961,542,573,841],{"slug":974,"featured":91,"template":844},"kingfisher-transforming-the-developer-experience-with-gitlab",{"content":976,"config":986},{"title":977,"description":978,"authors":979,"heroImage":981,"date":982,"body":983,"category":759,"tags":984},"How Indeed transformed its CI platform with GitLab","The world's #1 job site migrated thousands of projects to GitLab CI, boosting productivity and cutting costs. Learn the benefits they realized, including a 79% increase in daily pipelines.",[980],"Carl Myers","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099351/Blog/Hero%20Images/Blog/Hero%20Images/Indeed-blog-cover-image-2_4AgA1DkWLtHwBlFGvMffbC_1750099350771.png","2024-08-27","***Editor's note: From time to time, we invite members of our customer community to contribute to the GitLab Blog. Thanks to Carl Myers, Manager of CI Platforms at Indeed, for sharing your experience with GitLab.***\n\nHere at Indeed, our mission is to help people get jobs. Indeed is the [#1 job site](https://www.indeed.com/about?isid=press_us&ikw=press_us_press%2Freleases%2Faward-winning-actress-viola-davis-to-keynote-indeed-futureworks-2023_textlink_https%3A%2F%2Fwww.indeed.com%2Fabout) in the world with more than 350 million unique visitors every month.\n\nFor Indeed's Engineering Platform teams, we have a slightly different motto: \"We help people to help people get jobs.\" As part of a data-driven engineering culture that has spent the better part of two decades always putting the job seeker first, we are responsible for building the tools that not only make this possible, but empower engineers to deliver positive outcomes to job seekers every day.\n\nGitLab Continuous Integration has allowed Indeed’s CI Platform team of just 11 people to effectively support thousands of users across the company. Other benefits Indeed has realized by moving to GitLab CI include:\n- 79% increase in daily pipelines\n- 10-20% lower CI hardware costs\n- Decreased support burden\n\n## Evolving our CI platform: From Jenkins to a scalable solution\n\nLike many large technology companies, we built our CI platform organically as the company scaled, using the de facto open source and industry standard solutions available at the time. Back in 2007, when Indeed had fewer than 20 engineers, we were using Hudson, Jenkins’ direct predecessor.\n\nToday, through nearly two decades of growth, we have thousands of engineers. As new technology became available, we made incremental improvements, switching to Jenkins around 2011. Another improvement allowed us to move most of our workloads to dynamic cloud worker nodes using [AWS EC2](https://aws.amazon.com/ec2/). As we entered the Kubernetes age, however, the system architecture reached its limits.\n\nJenkins’ architecture was not created with the cloud in mind. Jenkins operates by having a \"controller\" node, a single point of failure that runs critical parts of a pipeline and farms out certain steps to worker nodes (which can scale horizontally to some extent). Controllers are also a manual scaling axis.\n\nIf you have too many jobs to fit on one controller, you must partition your jobs across controllers manually. CloudBees offers ways to mitigate this, including the CloudBees Jenkins Operations Center, which allows you to manage your constellation of controllers from a single centralized place. However, controllers remain challenging to run in a Kubernetes environment because each controller is a fragile single point of failure. Activities like node rollouts or hardware failures cause downtime.\n\nIn addition to the technical limitations baked into Jenkins itself, our CI platform also had several problems of our own making. For example, we used the Groovy Jenkins DSL to generate jobs from code in each repository. This led to each project having its own copy-pasted job pipeline, resulting in hundreds of versions that were hard to maintain and update. While Indeed’s engineering culture values flexibility and allows teams to operate in separate repositories, this flexibility became a burden as teams spent too much time addressing regular maintenance requests.\n\nRecognizing our technical debt, we turned to the [Golden Path pattern](https://tag-app-delivery.cncf.io/whitepapers/platforms/), which allows flexibility while providing a default route to simplify updates and encourage consistent practices across projects.\n\nThe CI Platform team at Indeed is not very large. Our team of around 11 engineers supports thousands of users, fielding support requests, performing upgrades and maintenance, and enabling always-on support for our global company.\n\nBecause our team not only supports our GitLab instance but also the entire CI platform, including the artifact server, our shared build code, and multiple other custom components of our platform, we had our work cut out for us. We needed a plan that would help us address our challenges while making the most efficient use of our existing resources.\n\n## Moving to GitLab CI\n\nAfter a careful design review with key stakeholders, we decided to migrate the entire company from Jenkins to GitLab CI. The primary reasons for choosing GitLab CI were:\n- We were already using GitLab for source code management.\n- GitLab is a complete offering that provides everything we need for CI.\n- GitLab CI is designed for scalability and the cloud.\n- GitLab CI enables us to write templates that extend other templates, which is compatible with our golden path strategy.\n- GitLab is open source software and the GitLab team has always been supportive in helping us submit fixes, giving us extra flexibility and reassurance.\n\nBy the time we officially announced that the GitLab CI Platform would be generally available to users, we already had 23% of all builds happening in GitLab CI from a combination of grassroots efforts and early adopters.\n\nThe challenge of the migration, however, would be the long tail. Due to the number of custom builds in Jenkins, an automated migration tool would not work for the majority of teams. Most of the benefits of the new system would not come until the old system was at 0%. Only then could we turn off the hardware and save the CloudBees license fee.\n\n## Feature parity and the benefits of starting over\n\nThough we support many different technologies at Indeed, the three most common languages are Java, Python, and JavaScript. These language stacks are used to make libraries, deployables (web services or applications), and cron jobs (a process that runs at regular intervals, for example, to build a data set in our data lake). Each of these formed a matrix of project types (Java Library, Python Cronjob, JavaScript Webapp, etc.) for which we had a skeleton in Jenkins. Therefore, we had to produce a golden path template in GitLab CI for each of these project types.\n\nMost users could use these recommended paths without change, but for those who did require customization, the golden path would still be a valuable starting point and enable them to change only what they needed, while still benefiting from centralized template updates in the future.\n\nWe quickly realized that most users, even those with customizations, were happy to take the golden path and at least try it. If they missed their customizations, they could always add them later. This was a surprising result! We thought that teams who had invested in significant customization would be loath to give them up, but in the majority of cases teams just didn't care about them anymore. This allowed us to migrate many projects very quickly — we could just drop the golden path (a small file about 6 lines long with includes) into their project, and they could take it from there.\n\n## InnerSource to the rescue\n\nThe CI Platform team also adopted a policy of \"external contributions first\" to encourage everyone in the company to participate. This is sometimes called InnerSource. We wrote tests and documentation to enable external contributions — contributions from outside our immediate team — so teams that wanted to write customizations could instead include them in the golden path behind a feature flag. This let them share their work with others and ensure we didn't break them moving forward (because they became part of our codebase, not theirs).\n\nThis also had the benefit that particular teams who were blocked waiting for a feature they needed were empowered to work on the feature themselves. We could say \"we plan to implement the feature in a few weeks, but if you need it earlier than that we are happy to accept a contribution.\" In the end, many core features necessary for parity were developed in this manner, more quickly and better than our team had resources to do it. The migration would not have been a success without this model.\n\n## Ahead of schedule and under budget\n\nOur CloudBees license expired on April 1, 2024. This gave us an aggressive target to achieve the full migration. This was particularly ambitious considering that at the time, 80% of all builds (60% of all projects) still used Jenkins for their CI. This meant over 2,000 [Jenkinsfiles](https://www.jenkins.io/doc/book/pipeline/jenkinsfile/) would still need to be rewritten or replaced with our golden path templates.\n\nTo achieve this target, we made documentation and examples available, implemented features where possible, and helped our users contribute features where they were able.\n\nWe started regular office hours, where anyone could come and ask questions or seek our help to migrate. We additionally prioritized support questions relating to migration ahead of almost everything else. Our team became GitLab CI experts and shared that expertise inside our team and across the organization.\n\nAutomatic migration for most projects was not possible, but we discovered it could work for a small subset of projects where customization was rare. We created a Sourcegraph batch change campaign to submit merge requests to migrate hundreds of projects, and poked and prodded our users to accept these MRs.\n\nWe took success stories from our users and shared them widely. As users contributed new features to our golden paths, we advertised that these features \"came free\" when you migrated to GitLab CI. Some examples included built-in security and compliance scanning, Slack notifications for CI builds, and integrations with other internal systems.\n\nWe also conducted a campaign of aggressive \"scream tests.\" We automatically disabled Jenkins jobs that hadn't run or succeeded in a while, and told users that if they needed them, they could turn them back on. This was a low-friction way to identify which jobs were actually needed. We had thousands of jobs that hadn't been run a single time since our last CI migration (which was Jenkins to Jenkins). This told us we could safely ignore almost all of them.\n\nIn January 2024, we nudged our users by announcing that all Jenkins controllers would become read-only (no builds) unless an exception was explicitly requested. We had much better ownership information for controllers and they generally aligned with our organization's structure, so it made sense to focus on controllers rather than jobs. The list of controllers was also a much more manageable list than the list of jobs.\n\nTo obtain an exception, we asked our users to find their controllers in a spreadsheet and put their contact information next to each one. This enabled us to get a guaranteed up-to-date list of stakeholders we could follow up with as we sprinted to the finish line, but also enabled users to clearly let us know which jobs they absolutely needed. At peak, we had about 400 controllers; by January we had 220, but only 54 controllers required exceptions (several of them owned by us, to run our tests and canaries).\n\n![Indeed - Jenkins Controller Count graph](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099357/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099357392.png)\n\nWe had a manageable list of around 50 teams we divided among our team and started doing outreach to understand how each team was progressing with the migration. We spent January and February discovering that some teams planned to finish their migration without our help before February 28 others were planning to deprecate their projects before then, and a very small number were very worried they wouldn't make it.\n\nWe were able to work with this smaller set of teams and provide them with “white-glove” service. We still explained that while we lacked the expertise necessary to do the migration for them, we could partner with a subject matter expert from their team. For some projects, we wrote and they reviewed; for others, they wrote and we reviewed. In the end, all of our work paid off and we turned off Jenkins on the very day we had announced 8 months earlier.\n\n## The results: Enhanced CI efficiency and user satisfaction\n\nAt its peak, our Jenkins CI platform ran over 14,000 pipelines per day and serviced our thousands of projects. Today, our GitLab CI platform has run over 40,000 pipelines in a single day and regularly runs over 25,000 per day. The incremental cost of each job of each pipeline is similar to Jenkins, but without the overhead of hardware to run the controllers. Additionally, these controllers served as single points of failure and scaling limiters that forced us to artificially divide our platform into segments. While an apples-to-apples comparison is difficult, we find that with this overhead gone our CI hardware costs are 10-20% lower. Additionally, the support burden of GitLab CI is lower since the application automatically scales in the cloud, has cross-availability-zone resiliency, and the templating language has excellent public documentation available.\n\nA benefit just as important, if not moreso, is that now we are at over 70% adoption of our golden paths. This means that we can roll out an improvement and over 5,000 projects at Indeed will benefit immediately with no action required on their part. This has enabled us to move some jobs to more cost-effective ARM64 instances, keep users' build images updated more easily, and better manage other cost saving opportunities. Most importantly, our users are happier with the new platform.\n\n__About the author:__\n*Carl Myers lives in Sacramento, CA, and is the manager of the CI Platform team at Indeed. Carl has spent his nearly two-decade career dedicated to building internal tools and developer platforms that delight and empower engineers at companies large and small.*\n\n**Acknowledgements:**\n*This migration would not have been possible without the tireless efforts of Tron Nedelea, Eddie Huang, Vivek Nynaru, Carlos Gonzalez, Lane Van Elderen, and the rest of the CI Platform team. The team also especially appreciates the leadership of Deepak Bitragunta, and Irina Tyree for helping secure buy-in, resources and company wide alignment throughout this long project. Finally, our thanks go out to everyone across Indeed who contributed code, feedback, bug reports, and helped migrate projects.*\n\n**This is an edited version of the article [How Indeed Replaced Its CI Platform with Gitlab CI](https://engineering.indeedblog.com/blog/2024/08/indeed-gitlab-ci-migration/), originally published on the Indeed engineering blog.**",[961,109,985,542],"user stories",{"slug":987,"featured":91,"template":844},"how-indeed-transformed-its-ci-platform-with-gitlab",{"category":573,"slug":576,"posts":989},[990,1002,1016],{"content":991,"config":1000},{"title":992,"description":993,"authors":994,"heroImage":996,"date":997,"body":998,"category":576,"tags":999},"Atlassian ending Data Center as GitLab maintains deployment choice","As Atlassian transitions Data Center customers to cloud-only, GitLab presents a menu of deployment choices that map to business needs.",[995],"Emilio Salvador","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098354/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%281%29_5XrohmuWBNuqL89BxVUzWm_1750098354056.png","2025-10-07","Change is never easy, especially when it's not your choice. Atlassian's announcement that [all Data Center products will reach end-of-life by March 28, 2029](https://www.atlassian.com/blog/announcements/atlassian-ascend), means thousands of organizations must now reconsider their DevSecOps deployment and infrastructure. But you don't have to settle for deployment options that don't fit your needs. GitLab maintains your freedom to choose — whether you need self-managed for compliance, cloud for convenience, or hybrid for flexibility — all within a single AI-powered DevSecOps platform that respects your requirements.\n\nWhile other vendors force migrations to cloud-only architectures, GitLab remains committed to supporting the deployment choices that match your business needs. Whether you're managing sensitive government data, operating in air-gapped environments, or simply prefer the control of self-managed deployments, we understand that one size doesn't fit all.\n\n## The cloud isn't the answer for everyone\n\nFor the many companies that invested millions of dollars in Data Center deployments, including those that migrated to Data Center [after its Server products were discontinued](https://about.gitlab.com/blog/atlassian-server-ending-move-to-a-single-devsecops-platform/), this announcement represents more than a product sunset. It signals a fundamental shift away from customer-centric architecture choices, forcing enterprises into difficult positions: accept a deployment model that doesn't fit their needs, or find a vendor that respects their requirements.\n\nMany of the organizations requiring self-managed deployments represent some of the world's most important organizations: healthcare systems protecting patient data, financial institutions managing trillions in assets, government agencies safeguarding national security, and defense contractors operating in air-gapped environments.\n\nThese organizations don't choose self-managed deployments for convenience; they choose them for compliance, security, and sovereignty requirements that cloud-only architectures simply cannot meet. Organizations operating in closed environments with restricted or no internet access aren't exceptions — they represent a significant portion of enterprise customers across various industries.\n\n![GitLab vs. Atlassian comparison table](https://res.cloudinary.com/about-gitlab-com/image/upload/v1759928476/ynl7wwmkh5xyqhszv46m.jpg)\n\n## The real cost of forced cloud migration goes beyond dollars\n\nWhile cloud-only vendors frame mandatory migrations as \"upgrades,\" organizations face substantial challenges beyond simple financial costs:\n\n* **Lost integration capabilities:** Years of custom integrations with legacy systems, carefully crafted workflows, and enterprise-specific automations become obsolete. Organizations with deep integrations to legacy systems often find cloud migration technically infeasible.\n\n* **Regulatory constraints:** For organizations in regulated industries, cloud migration isn't just complex — it's often not permitted. Data residency requirements, air-gapped environments, and strict regulatory frameworks don't bend to vendor preferences. The absence of single-tenant solutions in many cloud-only approaches creates insurmountable compliance barriers.\n\n* **Productivity impacts:** Cloud-only architectures often require juggling multiple products: separate tools for planning, code management, CI/CD, and documentation. Each tool means another context switch, another integration to maintain, another potential point of failure. GitLab research shows [30% of developers spend at least 50% of their job maintaining and/or integrating their DevSecOps toolchain](https://about.gitlab.com/developer-survey/). Fragmented architectures exacerbate this challenge rather than solving it.\n\n## GitLab offers choice, commitment, and consolidation\n\nEnterprise customers deserve a trustworthy technology partner. That's why we've committed to supporting a range of deployment options — whether you need on-premises for compliance, hybrid for flexibility, or cloud for convenience, the choice remains yours. That commitment continues with [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our AI solution that supports developers at every stage of their workflow.\n\nBut we offer more than just deployment flexibility. While other vendors might force you to cobble together their products into a fragmented toolchain, GitLab provides everything in a **comprehensive AI-native DevSecOps platform**. Source code management, CI/CD, security scanning, Agile planning, and documentation are all managed within a single application and a single vendor relationship.\n\nThis isn't theoretical. When [Airbus](https://about.gitlab.com/customers/airbus/) and [Iron Mountain](https://about.gitlab.com/customers/iron-mountain/) evaluated their existing fragmented toolchains, they consistently identified challenges: poor user experience, missing functionalities like built-in security scanning and review apps, and management complexity from plugin troubleshooting. **These aren't minor challenges; they're major blockers for modern software delivery.**\n\n## Your migration path: Simpler than you think\n\nWe've helped thousands of organizations migrate from other vendors, and we've built the tools and expertise to make your transition smooth:\n\n* **Automated migration tools:** Our [Bitbucket Server importer](https://docs.gitlab.com/user/project/import/bitbucket_server/) brings over repositories, pull requests, comments, and even Large File Storage (LFS) objects. For Jira, our [built-in importer](https://docs.gitlab.com/user/project/import/jira/) handles issues, descriptions, and labels, with professional services available for complex migrations.\n\n* **Proven at scale:** A 500 GiB repository with 13,000 pull requests, 10,000 branches, and 7,000 tags is likely to [take just 8 hours to migrate](https://docs.gitlab.com/user/project/import/bitbucket_server/) from Bitbucket to GitLab using parallel processing.\n\n* **Immediate ROI:** A [Forrester Consulting Total Economic Impact™ study commissioned by GitLab](https://about.gitlab.com/resources/study-forrester-tei-gitlab-ultimate/) found that investing in GitLab Ultimate confirms these benefits translate to real bottom-line impact, with a three-year 483% ROI, 5x time saved in security related activities, and 25% savings in software toolchain costs.\n\n## Start your journey to a unified DevSecOps platform\n\nForward-thinking organizations aren't waiting for vendor-mandated deadlines. They're evaluating alternatives now, while they have time to migrate thoughtfully to platforms that protect their investments and deliver on promises.\n\nOrganizations invest in self-managed deployments because they need control, compliance, and customization. When vendors deprecate these capabilities, they remove not just features but the fundamental ability to choose environments matching business requirements.\n\nModern DevSecOps platforms should offer complete functionality that respects deployment needs, consolidates toolchains, and accelerates software delivery, without forcing compromises on security or data sovereignty.\n\n[Talk to our sales team](https://about.gitlab.com/sales/) today about your migration options, or explore our [comprehensive migration resources](https://about.gitlab.com/move-to-gitlab-from-atlassian/) to see how thousands of organizations have already made the switch.\n\nYou also can [try GitLab Ultimate with GitLab Duo Enterprise](https://about.gitlab.com/free-trial/devsecops/) for free for 30 days to see what a unified DevSecOps platform can do for your organization.",[581,573,812,856],{"featured":91,"template":844,"slug":1001},"atlassian-ending-data-center-as-gitlab-maintains-deployment-choice",{"content":1003,"config":1014},{"title":1004,"description":1005,"authors":1006,"heroImage":1009,"date":1010,"category":576,"tags":1011,"body":1013},"Why financial services choose single-tenant SaaS","Discover how GitLab Dedicated can help financial services organizations achieve compliant DevSecOps without compromising performance.",[1007,1008],"George Kichukov","Allie Holland","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662023/Blog/Hero%20Images/display-dedicated-for-government-article-image-0679-1800x945-fy26.png","2025-08-14",[635,1012],"DevOps platform","Walk into any major financial institution and you'll see the contradiction immediately. Past the armed guards, through the biometric scanners, beyond the reinforced walls and multiple security checkpoints, you'll find developers building the algorithms that power global finance — on shared infrastructure alongside millions of strangers.\n\nThe software powering today's financial institutions is anything but ordinary. It includes credit risk models that protect billions in assets, payment processing algorithms handling millions of transactions, customer intelligence platforms that drive business strategy, and regulatory systems ensuring operational compliance  — all powered by source code that serves as both operational core and strategic asset.\n\n## When shared infrastructure becomes systemic risk\n\nThe rise of software-as-a-service platforms has created an uncomfortable reality for financial institutions. Every shared tenant becomes an unmanaged third-party risk, turning platform-wide incidents into industry-wide disruptions. This is the exact kind of concentration risk drawing increasing attention from regulators.\n\nJPMorgan Chase's Chief Information Security Officer Patrick Opet recently issued a stark warning to the industry in an [open letter](https://www.jpmorgan.com/technology/technology-blog/open-letter-to-our-suppliers) to third-party suppliers. He highlighted how SaaS adoption \"is creating a substantial vulnerability that is weakening the global economic system\" by embedding \"concentration risk into global critical infrastructure.\" The letter emphasizes that \"an attack on one major SaaS or PaaS provider can immediately ripple through its customers,” creating exactly the systemic risk that multi-tenant cloud platforms for source code management, CI builds, CD deployments, and security scanning introduce.\n\nConsider the regulatory complexity this creates. In shared environments, your compliance posture becomes hostage to potential incidents impacting other tenants as well as the concentration risks of large attack surface providers. A misconfiguration affecting any organization on the platform can trigger wider impact across the entire ecosystem. \n\nData sovereignty challenges compound this risk. Shared platforms distribute workloads across multiple regions and jurisdictions, often without granular control over where your source code executes. For institutions operating under strict regulatory requirements, this geographic distribution can create compliance gaps that are difficult to remediate.\n\nThen there's the amplification effect. Every shared tenant effectively becomes an indirect third-party risk to your operations. Their vulnerabilities increase your attack surface. Their incidents can impact your availability. Their compromises can affect your environment.\n\n## Purpose-built for what matters most\n\nGitLab recognizes that your source code deserves the same security posture as your most sensitive customer data. Rather than forcing you to choose between cloud-scale efficiency and enterprise-grade security, GitLab delivers both through [GitLab Dedicated](https://about.gitlab.com/dedicated/), purpose-built infrastructure that maintains complete isolation.\n\nYour development workflows, source code [repositories](https://docs.gitlab.com/user/project/repository/), and [CI/CD pipelines](https://docs.gitlab.com/ci/pipelines/) run in an environment exclusively dedicated to your organization. The [hosted runners](https://docs.gitlab.com/administration/dedicated/hosted_runners/) for GitLab Dedicated exemplify this approach. These runners connect securely to your data center through outbound private links, allowing access to your private services without exposing any traffic to the public internet. The [auto-scaling architecture](https://docs.gitlab.com/runner/runner_autoscale/) provides the performance you need, without compromising security or control. \n \n## Rethinking control\n\nFor financial institutions, minimizing shared risk is only part of the equation — true resilience requires precise control over how systems operate, scale, and comply with regulatory frameworks. GitLab Dedicated enables comprehensive data sovereignty through multiple layers of customer control. You maintain complete authority over [encryption keys](https://docs.gitlab.com/administration/dedicated/encryption/#encrypted-data-at-rest) through [bring-your-own-key (BYOK)](https://docs.gitlab.com/administration/dedicated/encryption/#bring-your-own-key-byok) capabilities, ensuring that sensitive source code and configuration data remains accessible only to your organization. Even GitLab cannot access your encrypted data without your keys.\n\n[Data residency](https://docs.gitlab.com/subscriptions/gitlab_dedicated/data_residency_and_high_availability/) becomes a choice rather than a constraint. You select your preferred AWS region to meet regulatory requirements and organizational data governance policies, maintaining full control over where your sensitive source code and intellectual property are stored.\n\nThis control extends to [compliance frameworks](https://docs.gitlab.com/user/compliance/compliance_frameworks/) that financial institutions require. The platform provides [comprehensive audit trails](https://docs.gitlab.com/user/compliance/audit_events/) and logging capabilities that support compliance efforts for financial services regulations like [Sarbanes-Oxley](https://about.gitlab.com/compliance/sox-compliance/) and [GLBA Safeguards Rule](https://www.ftc.gov/business-guidance/privacy-security/gramm-leach-bliley-act).\n\nWhen compliance questions arise, you work directly with GitLab's dedicated support team — experienced professionals who understand the regulatory challenges that organizations in highly regulated industries face.\n\n## Operational excellence without operational overhead\n\nGitLab Dedicated maintains [high availability](https://docs.gitlab.com/subscriptions/gitlab_dedicated/data_residency_and_high_availability/) with [built-in disaster recovery](https://docs.gitlab.com/subscriptions/gitlab_dedicated/), ensuring your development operations remain resilient even during infrastructure failures. The dedicated resources scale with your organization's needs without the performance variability that shared environments introduce.\n\nThe [zero-maintenance approach](https://docs.gitlab.com/subscriptions/gitlab_dedicated/maintenance/) to CI/CD infrastructure eliminates a significant operational burden. Your teams focus on development while GitLab manages the underlying infrastructure, auto-scaling, and maintenance — including rapid security patching to protect your critical intellectual property from emerging threats. This operational efficiency doesn't come at the cost of security: the dedicated infrastructure provides enterprise-grade controls while delivering cloud-scale performance.\n\n## The competitive reality\n\nWhile some institutions debate infrastructure strategies, industry leaders are taking decisive action. [NatWest Group](https://about.gitlab.com/press/releases/2022-11-30-gitlab-dedicated-launches-to-meet-complex-compliance-requirements/), one of the UK's largest financial institutions, chose GitLab Dedicated to transform their engineering capabilities:\n\n> *\"NatWest Group is adopting GitLab Dedicated to enable our engineers to use a common cloud engineering platform; delivering new customer outcomes rapidly, frequently and securely with high quality, automated testing, on demand infrastructure and straight-through deployment. This will significantly enhance collaboration, improve developer productivity and unleash creativity via a 'single-pane-of-glass' for software development.\"*\n>\n> **Adam Leggett**, Platform Lead - Engineering Platforms, NatWest\n\n## The strategic choice\n\nThe most successful financial institutions face a unique challenge: They have the most to lose from shared infrastructure risks, but also the resources to architect better solutions. \n\n**The question that separates industry leaders from followers:** Will you accept shared infrastructure risks as the price of digital transformation, or will you invest in infrastructure that treats your source code with the strategic importance it deserves?\n\nYour trading algorithms aren't shared. Your risk models aren't shared. Your customer data isn't shared.\n\n**Why is your development platform shared?**\n\n*Ready to treat your source code like the strategic asset it is? [Let’s chat](https://about.gitlab.com/solutions/finance/) about how GitLab Dedicated provides the security, compliance, and performance that financial institutions demand — without the compromises of shared infrastructure.*",{"featured":6,"template":844,"slug":1015},"why-financial-services-choose-single-tenant-saas",{"content":1017,"config":1028},{"title":1018,"description":1019,"authors":1020,"heroImage":1022,"date":1023,"body":1024,"category":576,"tags":1025},"Why now is the time for embedded DevSecOps","Learn how embedded development teams address long feedback cycles, manual compliance, and isolated development with DevSecOps.",[1021],"Matt DeLaney","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659978/Blog/Hero%20Images/automation.png","2025-07-01","For embedded systems teams, DevSecOps has traditionally seemed like an approach better suited to SaaS applications than firmware development. But this is changing. Software is now a primary differentiator in hardware products. New market expectations demand modern development practices. In response, organizations are pursuing \"embedded DevSecOps.\"\n\nWhat is embedded DevSecOps? The application of collaborative engineering practices, integrated toolchains, and automation for building, testing, and securing software to embedded systems development. Embedded DevSecOps includes necessary adaptations for hardware integration.\n## Convergence of market forces\nThree powerful market forces are converging to compel embedded teams to modernize their development practices.\n### 1. The software-defined product revolution\nProducts once defined primarily by their hardware are now differentiated by their software capabilities. The software-defined vehicle (SDV) market tells a compelling story in this regard. It's projected to grow from $213.5 billion in 2024 to [$1.24 trillion](https://www.marketsandmarkets.com/Market-Reports/software-defined-vehicles-market-187205966.html) by 2030, a massive 34% compound annual growth rate.\nThe software content in these products is growing considerably. By the end of 2025, the average vehicle is expected to contain [650 million lines of code](https://www.statista.com/statistics/1370978/automotive-software-average-lines-of-codes-per-vehicle-globally/). Traditional embedded development approaches cannot handle this level of software complexity. \n### 2. Hardware virtualization as a technical enabler\nHardware virtualization is a key technical enabler of embedded DevSecOps. Virtual electronic control units (vECUs), cloud-based ARM CPUs, and sophisticated simulation environments are becoming more prevalent. Virtual hardware allows testing that once required physical hardware.\n\nThese virtualization technologies provide a foundation for continuous integration ([CI](https://about.gitlab.com/topics/ci-cd/)). But their value is fully realized only when integrated into an automated workflow. Combined with collaborative development practices and automated pipelines, virtual testing helps teams detect issues much earlier, when fixes are far less expensive. Without embedded DevSecOps practices and tooling to orchestrate these virtual resources, organizations can't capitalize on the virtualization trend.\n### 3. The competitive and economic reality\nThree interrelated forces are reshaping the competitive landscape for embedded development:\n- The talent war has shifted decisively. As an embedded systems leader at a GitLab customer explained, “No embedded engineers graduating from college today know legacy tools like Perforce. They know Git. These young engineers will work at a company for six months on legacy tools, then quit.” Companies using outdated tools may lose their engineering future.\n- This talent advantage translates into competitive superiority. Tech-forward companies that attract top engineers with modern practices achieve remarkable results. For example, in 2024, [SpaceX](https://spacenews.com/spacex-launch-surge-helps-set-new-global-launch-record-in-2024/) performed more orbital launches than the rest of the world combined. Tech-forward companies excel at software development and embrace a modern development culture. This, among other things, creates efficiencies that legacy companies struggle to match. \n- The rising costs of embedded development — driven by long feedback cycles — create an urgent need for embedded DevSecOps. When developers have to wait weeks to test code on hardware test benches, productivity remains inherently low. Engineers lose context and must switch contexts when results arrive. The problem worsens when defects enter the picture. Bugs become more expensive to fix the later they're discovered. Long feedback cycles magnify this problem in embedded systems.\n\nOrganizations are adopting embedded DevSecOps to help combat these challenges.\n## Priority transformation areas\nBased on these market forces, forward-thinking embedded systems leaders are implementing embedded DevSecOps in the following ways. \n### From hardware bottlenecks to continuous testing\nHardware-testing bottlenecks represent one of the most significant constraints in traditional embedded development. These delays create the unfavorable economics described earlier — when developers wait weeks for hardware access, defect costs spiral.\nAddressing this challenge requires a multifaceted approach including: \n* Automating the orchestration of expensive shared hardware test benches among embedded developers  \n* Integrating both SIL (Software-in-the-Loop) and HIL (Hardware-in-the-Loop) testing into automated CI pipelines  \n* Standardizing builds with version-controlled environments\n\nEmbedded developers can accomplish this with GitLab's [On-Premises Device Cloud](https://gitlab.com/gitlab-accelerates-embedded/comp/device-cloud), a CI/CD component. Through automating the orchestration of firmware tests on virtual and real hardware, teams are better positioned to reduce feedback cycles from weeks to hours. They also can catch more bugs early on in the software development lifecycle.\n### Automating compliance and security governance\nEmbedded systems face strict regulatory requirements. Manual compliance processes are unsustainable.\nLeading organizations are transforming how they comply with these requirements by: \n* Replacing manual workflows with automated [compliance frameworks](https://about.gitlab.com/blog/introducing-custom-compliance-frameworks-in-gitlab/)  \n* Integrating specialized functional safety, security, and code quality tools into automated continuous integration pipelines  \n* Automating approval workflows, enforcing code reviews, and maintaining audit trails  \n* Configuring compliance frameworks for specific standards like ISO 26262 or DO-178C\n\nThis approach enables greater compliance maturity without additional headcount — turning what was once a burden into a competitive advantage. One leading electric vehicle (EV) manufacturer executes 120,000 CI/CD jobs per day with GitLab, many of which include compliance checks. And they can fix and deploy bug fixes to vehicles within an hour of discovery. This level of scale and speed would be extremely difficult without automated compliance workflows.\n### Enabling collaborative innovation\nHistorically, for valid business and technical reasons, embedded developers have largely worked alone at their desks. Collaboration has been limited. Innovative organizations break down these barriers by enabling shared code visibility through integrated source control and CI/CD workflows. These modern practices attract and retain engineers while unlocking innovation that would remain hidden in isolated workflows.\nAs one director of DevOps at a tech-forward automotive manufacturer (a GitLab customer) explains: \"It's really critical for us to have a single pane of glass that we can look at and see the statuses. The developers, when they bring a merge request, are aware of the status of a given workflow in order to move as fast as possible.\" This transparency accelerates innovation, enabling automakers to rapidly iterate on software features that differentiate their vehicles in an increasingly competitive market.\n## The window of opportunity\nEmbedded systems leaders have a clear window of opportunity to gain a competitive advantage through DevSecOps adoption. But the window won't stay open forever. Software continues to become the primary differentiator in embedded products, and the gap between leaders and laggards will only widen.\nOrganizations that successfully adopt DevSecOps will reduce costs, accelerate time-to-market, and unlock innovation that differentiates them in the market. The embedded systems leaders of tomorrow are the ones embracing DevSecOps today.\n> While this article explored why now is the critical time for embedded teams to adopt DevSecOps, you may be wondering about the practical steps to get started. Learn how to put these concepts into action with our guide: [4 ways to accelerate embedded development with GitLab](https://about.gitlab.com/blog/4-ways-to-accelerate-embedded-development-with-gitlab/).",[1026,812,109,1027],"embedded DevOps","automotive",{"featured":6,"template":844,"slug":1029},"why-now-is-the-time-for-embedded-devsecops",{"category":776,"slug":780,"posts":1031},[1032,1045,1057],{"content":1033,"config":1043},{"title":1034,"description":1035,"authors":1036,"heroImage":1039,"date":1040,"body":1041,"category":780,"tags":1042},"Variable and artifact sharing in GitLab parent-child pipelines","Learn how to simplify complex CI/CD pipelines with these best practices for sharing data in more modular pipeline setups. ",[1037,1038],"William Arias","Daniel Helfand","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664198/Blog/Hero%20Images/Self-Hosted_1800x945.png","2025-10-16","Software projects have different evolving needs and requirements. Some have\nsaid that *software is never finished, merely abandoned*. Some software\nprojects are small and others are large with complex integrations. Some have\ndependencies on external projects, while others are self-contained.\nRegardless of the size and complexity, the need to validate and ensure\nfunctionality remains paramount. \n\n\nCI/CD pipelines can help with the challenge of building and validating software projects consistently, but, much like the software itself, these pipelines can become complex with many dependencies. This is where ideas like [parent-child pipelines](https://docs.gitlab.com/ci/pipelines/downstream_pipelines/#parent-child-pipelines) and data exchange in CI/CD setups become incredibly important.\n\nIn this article, we will cover common CI/CD data exchange challenges users may encounter with parent-child pipelines in GitLab — and how to solve them. You'll learn how to turn complex CI/CD processes into more manageable setups. \n\n## Using parent-child pipelines\n\nThe pipeline setup in the image below illustrates a scenario where a project could require a large, complex pipeline. The whole project resides in one repository and contains different modules. Each module requires its own set of build and test automation steps. \n\n\nOne approach to address the CI/CD configuration in a scenario like this is to break down the larger pipeline into smaller ones (i.e., child pipelines) and keep a common CI/CD process that is shared across all modules in charge of the whole orchestration (i.e., parent pipeline).\n\n\n![CI/CD configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1760617772/hizwvhmgxn6exbmvsnrv.png)\n\n\nThe parent-child pipeline pattern allows a single pipeline to orchestrate one or many downstream pipelines. Similar to how a single pipeline coordinates the execution of multiple [jobs](https://docs.gitlab.com/ci/jobs/), the parent pipeline coordinates the running of full pipelines with one or more jobs.\n\n\nThis pattern has been shown to be helpful in a variety of use cases:\n\n\n* Breaking down large, complex pipelines into smaller, manageable pieces  \n\n* Conditionally executing certain pipelines as part of a larger CI/CD process  \n\n* Executing pipelines in parallel  \n\n* Helping manage user permissions to access and run certain pipelines \n\n\n\nGitLab’s current CI/CD structure supports this pattern and makes it simple to implement parent-child pipelines. While there are many benefits when using the parent-child pipeline pattern with GitLab, one question we often get is how to share data between the parent and child pipelines. In the next sections, we’ll go over how to make use of GitLab variables and artifacts to address this concern.\n\n\n### Sharing variables\n\n\nThere are cases where it is necessary to pass the output from a parent pipeline job to a child pipeline. These outputs can be shared as variables, [artifacts](https://docs.gitlab.com/ci/jobs/job_artifacts/), and [inputs](https://docs.gitlab.com/ci/inputs/).\n\n\nConsider a case where we create a custom variable `var_1` during the runtime of a job:\n\n\n\n```\n\nstages:\n  - build\n  - triggers\n\n# This job only creates a variable \n\ncreate_var_job:\n  stage: build\n  script:\n    - var_1=\"Hi, I'm a Parent pipeline variable\"\n    - echo \"var_1=$var_1\" >> var.env\n  artifacts:\n    reports:\n      dotenv: var.env\n```\n\n\nNotice that the variable is created as part of the script steps in the job (during runtime). In this example, we are using a simple string `\"Hi, I'm a Parent pipeline variable\"` to illustrate the main syntax required to later share this variable with a child pipeline. Let's break down the `create_var_job`  and analyze the main steps from this GitLab job \n\n\nFirst, we need to save `var_1` as `dotenv`:\n\n\n```\n  script:\n    - var_1=\"Hi, I'm a pipeline variable\"\n    - echo \"var_1=$var_1\" >> var.env\n```\n\n\nAfter saving `var_1` as `var.env`, the next important step is to make this variable available as an artifact produced by the `create_var_job`. To do that, we use the following syntax: \n\n\n```\n\nartifacts:\n    reports:\n      dotenv: var.env\n```\n\n\nUp to this point, we have created a variable during runtime and saved it as a `dotenv` report. Now let's add the job that should trigger the child pipeline:\n\n\n```\n\ntelco_service_a:\n  stage: triggers\n  trigger:\n    include: service_a/.gitlab-ci.yml\n  rules:\n    - changes:\n        - service_a/*\n```\n\n\nThe goal of `telco_service_a`  job is to find the `.gitlab-ci.yml` configuration of the child pipeline,  which is defined in this case as `service_a,` and trigger its execution. Let's examine this job: \n\n\n```\n\ntelco_service_a:\n  stage: triggers\n  trigger:\n    include: service_a/.gitlab-ci.yml\n```\n\n\nWe see it belongs to another `stage` of the pipeline named `triggers.`This job will run only after `create_var_job` from the first stage successfully finishes and where the variable  `var_1` we want to pass is created.\n\n\nAfter defining the stage, we use the reserved words `trigger` and `include` to tell GitLab where to search for the child pipeline configuration, as illustrated in the YAML below:\n\n\n```\n  trigger:\n    include: service_a/.gitlab-ci.yml\n```\n\n\nOur child-pipeline YAML configuration is under `service_a/.gitlab-ci.yml` folder in the GitLab repository, for this example. \n\n\n![child-pipeline YAML configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1760617772/ujkirpbifthpuujkcm6f.png)\n\n\n\u003Cp>\u003C/p>\n\n\n\u003Ccenter>\u003Ci>Child pipelines folders with configurations\u003C/i>\u003C/center>\n\n\n\u003Cp>\u003C/p>\n\n\nTake into consideration that the repository structure depicted above can vary. What matters is properly pointing the  `triggers: include` properties at the location of your child-pipeline configuration in your repository.\n\n\nFinally, we use `rules: changes` to indicate to GitLab that this child pipeline should be triggered only if there is any change in any file in the `service_a/.gitlab-ci.yml` directory, as illustrated in the following code snippet:\n\n\n```\n\nrules:\n    - changes:\n        - service_a/*\n```\n\n\nUsing this rule helps to optimize cost by triggering the child pipeline job only when necessary. This approach is particularly valuable in a monorepo architecture where specific modules contain numerous components, allowing us to avoid running their dedicated pipelines when no changes have been made to their respective codebases.\n\n\n#### Configuring the parent pipeline \n\n\nUp to this point, we have put together our parent pipeline. Here's the full code snippet for this segment:\n\n```\n\n# Parent Pipeline Configuration\n\n# This pipeline creates a custom variable and triggers a child pipeline\n\n\nstages:\n  - build\n  - trigger\n\ncreate_var_job:\n  stage: build\n  script:\n    - var_1=\"Hi, I'm a Parent pipeline variable\"\n    - echo \"var_1=$var_1\" >> var.env\n  artifacts:\n    reports:\n      dotenv: var.env\n\ntelco_service_a:\n  stage: triggers\n  trigger:\n    include: service_a/.gitlab-ci.yml\n  rules:\n    - changes:\n        - service_a/*\n```\n\n\nWhen GitLab executes the YAML configuration in the GitLab UI, the parent pipeline gets rendered as follows:\n\n\n![parent pipeline rendering](https://res.cloudinary.com/about-gitlab-com/image/upload/v1760617771/e1azkkr0rnzd42dzkw1x.png)\n\n\nNotice the label \"trigger job,\" which indicates this job will start the execution of another pipeline configuration.\n\n\n#### Configuring the child pipeline \n\n\nMoving forward, let's now focus on the child pipeline configuration, where we expect to inherit and print the value of the `var_1` created in the parent pipeline.\n\n\nThe pipeline configuration in `service_a/.gitlab_ci.yml` has the following definition:\n\n\n```\n\nstages:\n  - build\n\nbuild_a:\n  stage: build\n  script:\n    - echo \"this job inherits the variable from the Parent pipeline:\"\n    - echo $var_1\n  needs:\n    - project: gitlab-da/use-cases/7-4-parent-child-pipeline\n      job: create_var_job\n      ref: main\n      artifacts: true\n```\n\n\nLike before, let's break down this pipeline and highlight the main parts to achieve our goal. This pipeline only contains one stage (i.e., `build)` and one job (i.e., `build_a)`. The script in the job contains two steps:\n\n\n```\n\nbuild_a:\n  stage: build\n  script:\n    - echo \"this job inherits the variable from the Parent pipeline:\"\n    - echo $var_1\n```\n\n\nThese two steps print output during the execution. The most interesting one is the second step, `echo $var_1`, where we expect to print the variable value inherited from the parent pipeline. Remember, this was a simple string with value: `\"Hi, I'm a Parent pipeline variable.\"` \n\n\n#### Inheriting variables using needs\n\n\nTo set and link this job to inherit variables from the parent pipeline, we use the reserved GitLab CI properties `needs` as depicted in the following snippet:\n\n\n```\n\nneeds:\n    - project: gitlab-da/use-cases/7-4-parent-child-pipeline\n      job: create_var_job\n      ref: main\n      artifacts: true\n```\n\n\nUsing the \"needs\" keyword, we define dependencies that must be completed before running this job. In this case, we pass four different values. Let's walk through each one  of them:\n\n\n* **Project:** The complete namespace of the project where the main `gitlab-ci.yml` containing the parent pipeline YAML is located. Make sure to include the absolute path.  \n\n* **Job:** The specific job name in the parent pipeline from where we want to inherit the variable.   \n\n* **Ref:** The name of the branch where the main `gitlab-ci.yml` containing the parent pipeline YAML is located.   \n\n* **Artifacts:** Where we set a boolean value, indicating that artifacts from the parent pipeline job should be downloaded and made available to this child pipeline job.\n\n\n**Note:** This specific approach using the needs property is only available to GitLab Premium and Ultimate users. We will cover another example for GitLab community users later on. \n\n\n#### Putting it all together \n\n\nNow let's assume we make a change to any of the files under `service_a` folder and commit the changes to the repository. When GitLab detects the change, the rule we set up will trigger the child job pipeline execution. This gets displayed in the GitLab UI as follows:\n\n\n![Rule triggering the child job pipeline execution](https://res.cloudinary.com/about-gitlab-com/image/upload/v1760617771/e1azkkr0rnzd42dzkw1x.png)\n\n\nClicking on the `telco_service_a`  will take us to the jobs in the child pipeline:\n\n\n![Jobs in pipeline](https://res.cloudinary.com/about-gitlab-com/image/upload/v1760617773/vftjkg7ct2wqmew1e3yk.png)\n\n\nWe can see the parent-child relationship, and finally, by clicking on the `build_a job`, we can visually verify the variable inheritance in the job execution log:\n\n\n![Verifying the variable inheritance in the job execution log](https://res.cloudinary.com/about-gitlab-com/image/upload/v1760617758/hxfkfmev9hebbqhgcvoh.png)\n\n\nThis output confirms the behavior we expected. The custom runtime variable `var_1` created in the parent job is inherited in the child job, unpacked from the `dotenv` report, and its value accessible as can be confirmed in Line 26 above.\n\n\nThis use case illustrates how to share custom variables that can contain any value between pipelines. This example is intentionally simple and can be extrapolated to more realistic scenarios. Take, for instance, the following CI/CD configuration, where the custom variable we need to share is the tag of a Docker image:\n\n\n```\n\n# Pipeline \n\n\nbuild-prod-image:\n  tags: [ saas-linux-large-amd64 ]\n  image: docker:20.10.16\n  stage: build\n  services:\n    - docker:20.10.16-dind\n  \n  script:\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - docker build -t $PRODUCTION_IMAGE .\n    - docker push $PRODUCTION_IMAGE\n    - echo \"UPSTREAM_CONTAINER_IMAGE=$PRODUCTION_IMAGE\" >> prodimage.env\n\n  artifacts:\n    reports:\n      dotenv: prodimage.env\n\n  rules:\n     - if: '$CI_COMMIT_BRANCH == \"main\"'\n       when: always\n     - when: never\n```\n\n\nAnd use the variable with the Docker image tag, in another job that updates a Helm manifest file:\n\n\n```\n\nupdate-helm-values:\n    stage: update-manifests\n    image:\n        name: alpine:3.16\n        entrypoint: [\"\"]\n  \n    before_script:\n         - apk add --no-cache git curl bash yq\n         - git remote set-url origin https://${CI_USERNAME}:${GITOPS_USER}@${SERVER_PATH}/${PROJECT_PATH}\n         - git config --global user.email \"gitlab@gitlab.com\"\n         - git config --global user.name \"GitLab GitOps\"\n         - git pull origin main\n    script:\n          - cd src\n          - echo $UPSTREAM_CONTAINER_IMAGE\n          - yq eval -i \".spec.template.spec.containers[0].image |= \\\"$UPSTREAM_CONTAINER_IMAGE\\\"\" store-deployment.yaml\n          - cat store-deployment.yaml\n          - git pull origin main\n          - git checkout -B main\n          - git commit -am '[skip ci] prod image update'\n          - git push origin main\n    needs:\n      - project: gitlab-da/use-cases/devsecops-platform/simply-find/simply-find-front-end\n        job: build-prod-image\n        ref: main\n        artifacts: true\n```\n\n\nMastering how to share variables between pipelines while maintaining the relationship between them enables us to create more sophisticated workflow orchestration that can meet our software building needs. \n\n\n### Using GitLab Package Registry to share artifacts\n\n\nWhile the needs feature mentioned above works great for Premium and Ultimate users, GitLab also has features to help achieve similar results for Community Edition users. One suggested approach is to store artifacts in the [GitLab Package Registry](https://docs.gitlab.com/user/packages/package_registry/). \n\n\nUsing a combination of the variables provided in GitLab CI/CD jobs and the GitLab API, you can upload artifacts to the GitLab Package Registry from a parent pipeline. In the child pipeline, you can then access the uploaded artifact from the package registry using the same variables and API to access the artifact. Let’s take a look at the example pipeline and some supplementary scripts that illustrate this:\n\n\n**gitlab-ci.yml (parent pipeline)**\n\n\n```\n\n# Parent Pipeline Configuration\n\n# This pipeline creates an artifact, uploads it to Package Registry, and triggers a child pipeline\n\n\nstages:\n  - create-upload\n  - trigger\n\nvariables:\n  PACKAGE_NAME: \"pipeline-artifacts\"\n  PACKAGE_VERSION: \"$CI_PIPELINE_ID\"\n  ARTIFACT_FILE: \"artifact.txt\"\n\n# Job 1: Create and upload artifact to Package Registry\n\ncreate-and-upload-artifact:\n  stage: create-upload\n  image: alpine:latest\n  before_script:\n    - apk add --no-cache curl bash\n  script:\n    - bash scripts/create-artifact.sh\n    - bash scripts/upload-to-registry.sh\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"push\"\n\n# Job 2: Trigger child pipeline\n\ntrigger-child:\n  stage: trigger\n  trigger:\n    include: child-pipeline.yml\n    strategy: depend\n  variables:\n    PARENT_PIPELINE_ID: $CI_PIPELINE_ID\n    PACKAGE_NAME: $PACKAGE_NAME\n    PACKAGE_VERSION: $PACKAGE_VERSION\n    ARTIFACT_FILE: $ARTIFACT_FILE\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"push\"\n```\n\n\n**child-pipeline.yml**\n\n\n```\n\n# Child Pipeline Configuration\n\n# This pipeline downloads the artifact from Package Registry and processes it\n\n\nstages:\n  - download-process\n\nvariables:\n  # These variables are passed from the parent pipeline\n  PACKAGE_NAME: \"pipeline-artifacts\"\n  PACKAGE_VERSION: \"$PARENT_PIPELINE_ID\"\n  ARTIFACT_FILE: \"artifact.txt\"\n\n# Job 1: Download and process artifact from Package Registry\n\ndownload-and-process-artifact:\n  stage: download-process\n  image: alpine:latest\n  before_script:\n    - apk add --no-cache curl bash\n  script:\n    - bash scripts/download-from-registry.sh\n    - echo \"Processing downloaded artifact...\"\n    - cat $ARTIFACT_FILE\n    - echo \"Artifact processed successfully!\"\n```\n\n\n**upload-to-registry.sh**\n\n\n```\n\n#!/bin/bash\n\n\nset -e\n\n\n# Configuration\n\nPACKAGE_NAME=\"${PACKAGE_NAME:-pipeline-artifacts}\"\n\nPACKAGE_VERSION=\"${PACKAGE_VERSION:-$CI_PIPELINE_ID}\"\n\nARTIFACT_FILE=\"${ARTIFACT_FILE:-artifact.txt}\"\n\n\n# Validate required variables\n\nif [ -z \"$CI_PROJECT_ID\" ]; then\n    echo \"Error: CI_PROJECT_ID is not set\"\n    exit 1\nfi\n\n\nif [ -z \"$CI_JOB_TOKEN\" ]; then\n    echo \"Error: CI_JOB_TOKEN is not set\"\n    exit 1\nfi\n\n\nif [ -z \"$CI_API_V4_URL\" ]; then\n    echo \"Error: CI_API_V4_URL is not set\"\n    exit 1\nfi\n\n\nif [ ! -f \"$ARTIFACT_FILE\" ]; then\n    echo \"Error: Artifact file '$ARTIFACT_FILE' not found\"\n    exit 1\nfi\n\n\n# Construct the upload URL\n\nUPLOAD_URL=\"${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/${PACKAGE_NAME}/${PACKAGE_VERSION}/${ARTIFACT_FILE}\"\n\n\n# Upload the file using curl\n\nresponse=$(curl -w \"%{http_code}\" -o /tmp/upload_response.json \\\n    --header \"JOB-TOKEN: $CI_JOB_TOKEN\" \\\n    --upload-file \"$ARTIFACT_FILE\" \\\n    \"$UPLOAD_URL\")\n\nif [ \"$response\" -eq 201 ]; then\n    echo \"Upload successful!\"\nelse\n    echo \"Upload failed with HTTP code: $response\"\n    exit 1\nfi\n\n```\n\n\n**download-from-regsitry.sh**\n\n\n```\n\n#!/bin/bash\n\n\nset -e\n\n\n# Configuration\n\nPACKAGE_NAME=\"${PACKAGE_NAME:-pipeline-artifacts}\"\n\nPACKAGE_VERSION=\"${PACKAGE_VERSION:-$PARENT_PIPELINE_ID}\"\n\nARTIFACT_FILE=\"${ARTIFACT_FILE:-artifact.txt}\"\n\n\n# Validate required variables\n\nif [ -z \"$CI_PROJECT_ID\" ]; then\n    echo \"Error: CI_PROJECT_ID is not set\"\n    exit 1\nfi\n\n\nif [ -z \"$CI_JOB_TOKEN\" ]; then\n    echo \"Error: CI_JOB_TOKEN is not set\"\n    exit 1\nfi\n\n\nif [ -z \"$CI_API_V4_URL\" ]; then\n    echo \"Error: CI_API_V4_URL is not set\"\n    exit 1\nfi\n\n\nif [ -z \"$PACKAGE_VERSION\" ]; then\n    echo \"Error: PACKAGE_VERSION is not set\"\n    exit 1\nfi\n\n\n# Construct the download URL\n\nDOWNLOAD_URL=\"${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/${PACKAGE_NAME}/${PACKAGE_VERSION}/${ARTIFACT_FILE}\"\n\n\n# Download the file using curl\n\nresponse=$(curl -w \"%{http_code}\" -o \"$ARTIFACT_FILE\" \\\n    --header \"JOB-TOKEN: $CI_JOB_TOKEN\" \\\n    --fail-with-body \\\n    \"$DOWNLOAD_URL\")\n\nif [ \"$response\" -eq 200 ]; then\n    echo \"Download successful!\"\nelse\n    echo \"Download failed with HTTP code: $response\"\n    exit 1\nfi\n\n```\n\n\nIn this example, the parent pipeline uploads a file to the GitLab Package Registry by calling a script named `upload-to-registry.sh`. The script gives the artifact a name and version and constructs the API call to upload the file to the package registry. The parent pipeline is able to authenticate using a `$CI_JOB_TOKEN` to push the artifact.txt file to the registry. \n\n\nThe child pipeline operates the same as the parent pipeline by using a script to construct the API call to download the artifact.txt file from the package registry. It also is able to authenticate to the registry using the `$CI_JOB_TOKEN`. \n\n\nSince the GitLab Package Registry is available to all GitLab users, it helps to serve as a central location for storing and versioning artifacts. It is a great option for users working with many kinds of artifacts and needing to version artifacts for workflows even beyond CI/CD. \n\n\n### Using inputs to pass variables to a child pipeline\n\n\nIf you made it this far in this tutorial, and you have plans to start creating new pipeline configurations, you might want to start by evaluating if your use case can benefit from using **inputs** to pass variables to other pipelines. \n\n\nUsing inputs is a recommended way to pass variables when you need to define specific values in a CI/CD job and have those values remain fixed during the pipeline run. Inputs might offer certain advantages over the method we implemented before. For example, with inputs, you can include data validation through options (i.e., values must be one of these: \\[‘staging', ‘prod’\\]), variable descriptions, type checking, and assign default values before the pipeline run. \n\n\n#### Configuring CI/CD inputs\n\n\nConsider the following parent pipeline configuration:\n\n\n```\n\n# .gitlab-ci.yml (main file)\n\nstages:\n - trigger\n\ntrigger-staging:\n stage: trigger\n trigger:\n   include:\n     - local: service_a/.gitlab-ci.yml\n       inputs:\n         environment: staging\n         version: \"1.0.0\"\n```\n\n\nLet's zoom in at the main difference between the code snippet above and the previous parent pipeline examples in this tutorial: \n\n\n```\n\ntrigger:\n   include:\n     - local: service_a/.gitlab-ci.yml\n       inputs:\n         environment: staging\n         version: \"1.0.0\"\n```\n\n\nThe main difference is using the reserved word \"inputs\". This part of the YAML configuration can be read in natural language as: “trigger the child pipeline defined in `service_a.gitlab-ci.yml` and make sure to pass ‘environment: staging’ and ‘version:1.0.0’ as input variables that the child pipeline will know how to use.\n\n\n#### Reading CI/CD inputs in child pipelines\n\n\nMoving to the child pipeline, it must contain in its declaration a spec that defines the inputs it can take. For each input, it is possible to add a little description, a set of predefined options the input value can take, and the type of value it will take. This is illustrated as follows: \n\n\n```\n\n# target pipeline or child-pipeline in this case\n\n\nspec:\n  inputs:\n    environment:\n      description: \"Deployment environment\"\n      options: [staging, production]\n    version:\n      type: string\n      description: \"Application version\"\n\n\n---\n\n\nstages:\n  - deploy\n# Jobs that will use the inputs\n\ndeploy:\n  stage: deploy\n  script:\n     -  echo \"Deploying version $[[ inputs.version ]] to $[[ inputs.environment ]]\"\n\n```\n\n\nNotice from the code snippet that after defining the spec, there is a YAML document separator \"---\"  followed by the actual child pipeline definition where we access the variables `$[[ inputs.version ]]` and `$[[ inputs.environment ]]\"` from the defined inputs using input interpolation.\n\n\n## Get hands-on with parent-child pipelines, artifacts, and more\n\n\nWe hope this article has helped with navigating the challenge of sharing variables and artifacts in parent-child pipeline setups.\n\n\nTo try these examples for yourself, feel free to view or fork the [Premium/Ultimate](https://gitlab.com/gitlab-da/use-cases/devsecops-platform/devops-platform-wave/scenarios/scenario7-deep-dive-into-build-automation-and-ci/7-4-parent-child-pipeline/-/tree/main) and the [GitLab Package Registry](https://gitlab.com/gitlab-da/playground/dhelfand/parent-child-pipeline-with-package-registry-artifacts) examples of sharing artifacts.\n\n\nYou can also sign up for a [30-day free trial of GitLab Ultimate](https://about.gitlab.com/free-trial/) to experience all the features GitLab has to offer. Thanks for reading!\n",[109,573,857],{"featured":91,"template":844,"slug":1044},"variable-and-artifact-sharing-in-gitlab-parent-child-pipelines",{"content":1046,"config":1055},{"title":1047,"description":1048,"authors":1049,"heroImage":1051,"date":1052,"body":1053,"category":780,"tags":1054},"How we built a structured Streamlit Application Framework in Snowflake","Want to transform development from chaos to compliance? Learn how we implemented governance early on rather than retrofitting when maintenance costs climb exponentially.",[1050],"Radovan Bacovic","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097447/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%284%29_3LZkiDjHLjhqEkvOvBsVKp_1750097447404.png","2025-10-10","Recently, the GitLab Data team transformed scattered\n[Streamlit](https://streamlit.io/) applications into a unified, secure, and\nscalable solution for our Snowflake environment. To accomplish this, we\npacked Python, Snowflake, and Streamlit together with GitLab. Follow along\non this journey and discover the results we achieved, and learn how you can,\ntoo.\n\n## The challenge\n\nImagine this scenario: Your organization has dozens of Streamlit applications across different environments, running various Python versions, connecting to sensitive data with inconsistent security practices. Some apps work, others break mysteriously, and nobody knows who built what or how to maintain them.\n\n\nThis was exactly the challenge our data team faced. Applications were being created in isolation, with no standardization, no security oversight, and no clear deployment process. The result? A compliance nightmare and a maintenance burden that was growing exponentially.\n\n\n![Functional architectural design (high level)](https://res.cloudinary.com/about-gitlab-com/image/upload/v1760035999/i50lpkrwy9bok056rdak.png)\n\n\u003Cp>\u003C/p>\n\n\u003Ccenter>\u003Ci>Functional architectural design (high level)\u003C/i>\u003C/center>\n\n## How we started\n\nWe leveraged our unique position as customer zero by building this entire framework on GitLab's own CI/CD infrastructure and project management tools. Here are the ingredients we started with: \n\n1. [GitLab](https://about.gitlab.com/platform/) (product)\n\n1. [Snowflake](https://about.gitlab.com/platform/) - our single source of truth (SSOT) for the data warehouse activities (and more than that)\n\n1. [Streamlit](https://streamlit.io/) - an open-source tool for visual applications that has pure Python code under the hood\n\nThis provided us with immediate access to enterprise-grade DevSecOps capabilities, enabling us to implement automated testing, code review processes, and deployment pipelines from the outset. By utilizing GitLab's built-in features for issue tracking, merge requests, and automated deployments (CI/CD pipelines), we can iterate rapidly and validate the framework against real-world enterprise requirements. This internal-first approach ensured our solution was battle-tested on GitLab's own infrastructure before any external implementation.\n\n### The lessons we learned\n\nThe most critical lesson we learned from building the Streamlit Application Framework in Snowflake is that **structure beats chaos every time** — implement governance early rather than retrofitting it later when maintenance becomes exponential.\n\nYou also need to clearly define roles and responsibilities, separating infrastructure concerns from application development, so that each team can focus on its strengths.\n\nSecurity and compliance cannot be afterthoughts; they must be built into templates and automated processes from day one, as it's far easier to enforce consistent standards upfront than to force them after the fact. Invest heavily in automation and CI/CD pipelines, as manual processes don't scale and introduce human error.\n\n![Architecture of the framework (general overview)](https://res.cloudinary.com/about-gitlab-com/image/upload/v1760035998/qt9gfemxjnj8kjumkuh7.png)\n\n\u003Cp>\u003C/p>\n\n\u003Ccenter>\u003Ci>Architecture of the framework (general overview)\u003C/i>\u003C/center>\n\n##  How the Streamlit Application Framework changes everything\n\nThe Streamlit Application Framework turns a scattered approach into a structure. It gives developers freedom within secure guardrails, while automating deployment and eliminating maintenance complexity.\n\n### Three clear roles, one unified process\n\nThe framework introduces a structured approach with three distinct roles:\n\n1. **Maintainers** (Data team members and contributors) handle the infrastructure, including CI/CD pipelines, security templates, and compliance rules. They ensure the framework runs smoothly and stays secure.\n\n2. **Creators** (those who need to build applications) can focus on what they do best: creating visualizations, connecting to Snowflake data, and building user experiences. They have full flexibility to create new applications from scratch, add new pages to existing apps, integrate additional Python libraries, and build complex data visualisations — all without worrying about deployment pipelines or security configurations.\n\n3. **Viewers** (end users) access polished, secure applications without any technical overhead. All they need is Snowflake access.\n\n![Roles overview and their functionality](https://res.cloudinary.com/about-gitlab-com/image/upload/v1760035999/oatqyx3ug7vsgzishpma.png)\n\n\u003Cp>\u003C/p>\n\n\u003Ccenter>\u003Ci>Overview of roles and their functions\u003C/i>\u003C/center>\n\n## Automate everything\n\nBy implementing CI/CD, days of manual deployments and configuration headaches are gone. The framework provides:\n\n- **One-click environment preparation:** With a set of `make` commands, the environment is installed and ready in a few seconds.\n\n\n```yaml\n\n================================================================================\n\n✅ Snowflake CLI successfully installed and configured!\n\nConnection: gitlab_streamlit\n\nUser: YOU@GITLAB.COM\n\nAccount: gitlab\n\n================================================================================\n\nUsing virtualenv: /Users/YOU/repos/streamlit/.venv\n\n📚 Installing project dependencies...\n\nInstalling dependencies from lock file\n\nNo dependencies to install or update\n\n✅ Streamlit environment prepared!\n\n```\n\n- **Automated CI/CD pipelines:** Handle testing, code review, and deployment from development to production.\n\n- **Secure sandbox environments:** Provide for safe development and testing before production deployment.\n\n\n```yaml\n\n╰─$ make streamlit-rules\n\n🔍 Running Streamlit compliance check...\n\n================================================================================\n\nCODE COMPLIANCE REPORT\n\n================================================================================\n\nGenerated: 2025-07-09 14:01:16\n\nFiles checked: 1\n\n\nSUMMARY:\n\n✅ Passed: 1\n\n❌ Failed: 0\n\nSuccess Rate: 100.0%\n\n\nAPPLICATION COMPLIANCE SUMMARY:\n\n📱 Total Applications Checked: 1\n\n⚠️ Applications with Issues: 0\n\n📊 File Compliance Rate: 100.0%\n\n\nDETAILED RESULTS BY APPLICATION:\n\n...\n\n```\n\n\n- **Template-based application creation:** Ensures consistency across all applications and pages.\n\n\n```yaml\n\n╰─$ make streamlit-new-page STREAMLIT_APP=sales_dashboard STREAMLIT_PAGE_NAME=analytics\n\n📝 Generating new Streamlit page: analytics for app: sales_dashboard\n\n📃 Create new page from template:\n\nPage name: analytics\n\nApp directory: sales_dashboard\n\nTemplate path: page_template.py\n\n✅ Successfully created 'analytics.py' in 'sales_dashboard' directory from template\n\n```\n\n\n- **Poetry-based dependency management:** Prevents version conflicts and maintains clean environments.\n\n- **Organized project structure:** Has dedicated folders for applications, templates, compliance rules, and configuration management.\n\n\n```yaml\n\n├── src/\n\n│   ├── applications/     # Folder for Streamlit applications\n\n│   │   ├── main_app/     # Main dashboard application\n\n│   │   ├── components/   # Shared components\n\n│   │   └── \u003Cyour_apps>/  # Your custom application\n\n│   │   └── \u003Cyour_apps2>/ # Your 2nd custom application\n\n│   ├── templates/        # Application and page templates\n\n│   ├── compliance/       # Compliance rules and checks\n\n│   └── setup/            # Setup and configuration utilities\n\n├── tests/                # Test files\n\n├── config.yml            # Environment configuration\n\n├── Makefile              # Build and deployment automation\n\n└── README.md             # Main README.md file\n\n```\n\n\n- **Streamlined workflow:** Takes local development through testing schema to production, all automated through GitLab CI/CD pipelines.\n\n\n![GitLab CI/CD pipelines for full automation of the process](https://res.cloudinary.com/about-gitlab-com/image/upload/v1760035998/usyma2jkgiazu9iay1au.png)\n\n\u003Cp>\u003C/p>\n\u003Ccenter>\u003Ci>GitLab CI/CD pipelines for full automation of the process\u003C/i>\u003C/center>\n\n## Security and compliance by design\n\nInstead of bolting on security as an afterthought, the structured Streamlit Application Framework builds it in from the ground up. Every application adheres to the same security standards, and compliance requirements are automatically enforced. Audit trails are maintained throughout the development lifecycle.\n\nWe introduce our compliance rules and verify them with a single command. For instance, we can list which classes and methods are mandatory to use, which files you should have, and which roles are allowed and which are forbidden to share the application with. The rules are flexible and descriptive; all you need to do is define them in a YAML file:\n\n\n```yaml\n\nclass_rules:\n  - name: \"Inherit code for the page from GitLabDataStreamlitInit\"\n    description: \"All Streamlit apps must inherit from GitLabDataStreamlitInit\"\n    severity: \"error\"\n    required: true\n    class_name: \"*\"\n    required_base_classes:\n      - \"GitLabDataStreamlitInit\"\n    required_methods:\n      - \"__init__\"\n      - \"set_page_layout\"\n      - \"setup_ui\"\n      - \"run\"\n\nfunction_rules:\n  - name: \"Main function required\"\n    description: \"Must have a main() function\"\n    severity: \"error\"\n    required: true\n    function_name: \"main\"\n\nimport_rules:\n  - name: \"Import GitLabDataStreamlitInit\"\n    description: \"Must import the mandatory base class\"\n    severity: \"error\"\n    required: true\n    module_name: \"gitlab_data_streamlit_init\"\n    required_items:\n      - \"GitLabDataStreamlitInit\"\n  - name: \"Import streamlit\"\n    description: \"Must import streamlit library\"\n    severity: \"error\"\n    required: true\n    module_name: \"streamlit\"\n\nfile_rules:\n  - name: \"Snowflake configuration required (snowflake.yml)\"\n    description: \"Each application must have a snowflake.yml configuration file\"\n    severity: \"error\"\n    required: true\n    file_pattern: \"**/applications/**/snowflake.yml\"\n    base_path: \"\"\n  - name: \"Snowflake environment required (environment.yml)\"\n    description: \"Each application must have a environment.yml configuration file\"\n    severity: \"error\"\n    required: true\n    file_pattern: \"**/applications/**/environment.yml\"\n    base_path: \"\"\n  - name: \"Share specification required (share.yml)\"\n    description: \"Each application must have a share.yml file\"\n    severity: \"warning\"\n    required: true\n    file_pattern: \"**/applications/**/share.yml\"\n    base_path: \"\"\n  - name: \"README.md required (README.md)\"\n    description: \"Each application should have a README.md file with a proper documentation\"\n    severity: \"error\"\n    required: true\n    file_pattern: \"**/applications/**/README.md\"\n    base_path: \"\"\n  - name: \"Starting point recommended (dashboard.py)\"\n    description: \"Each application must have a dashboard.py as a starting point\"\n    severity: \"warning\"\n    required: true\n    file_pattern: \"**/applications/**/dashboard.py\"\n    base_path: \"\"\n\nsql_rules:\n  - name: \"SQL files must contain only SELECT statements\"\n    description: \"SQL files and SQL code in other files should only contain SELECT statements for data safety\"\n    severity: \"error\"\n    required: true\n    file_extensions: [\".sql\", \".py\"]\n    select_only: true\n    forbidden_statements:\n      - ....\n    case_sensitive: false\n  - name: \"SQL queries should include proper SELECT statements\"\n    description: \"When SQL is present, it should contain proper SELECT statements\"\n    severity: \"warning\"\n    required: false\n    file_extensions: [\".sql\", \".py\"]\n    required_statements:\n      - \"SELECT\"\n    case_sensitive: false\n\nshare_rules:\n  - name: \"Valid functional roles in share.yml\"\n    description: \"Share.yml files must contain only valid functional roles from the approved list\"\n    severity: \"error\"\n    required: true\n    file_pattern: \"**/applications/**/share.yml\"\n    valid_roles:\n      - ...\n    safe_data_roles:\n      - ...\n  - name: \"Share.yml file format validation\"\n    description: \"Share.yml files must follow the correct YAML format structure\"\n    severity: \"error\"\n    required: true\n    file_pattern: \"**/applications/**/share.yml\"\n    required_keys:\n      - \"share\"\n    min_roles: 1\n    max_roles: 10\n```\n\n\nWith one command running:\n\n\n```bash\n\n╰─$ make streamlit-rules\n\n```\n\n\nWe can verify all the rules we have created and validate that the developers (who are building a Streamlit application) are following the policy specified by the creators (who determine the policies and building blocks of the framework), and that all the building blocks are in the right place. This ensures consistent behavior across all Streamlit applications.\n\n\n```yaml\n\n🔍 Running Streamlit compliance check...\n\n================================================================================\n\nCODE COMPLIANCE REPORT\n\n================================================================================\n\nGenerated: 2025-08-18 17:05:12\n\nFiles checked: 4\n\n\nSUMMARY:\n\n✅ Passed: 4\n\n❌ Failed: 0\n\nSuccess Rate: 100.0%\n\n\nAPPLICATION COMPLIANCE SUMMARY:\n\n📱 Total Applications Checked: 1\n\n⚠️ Applications with Issues: 0\n\n📊 File Compliance Rate: 100.0%\n\n\nDETAILED RESULTS BY APPLICATION:\n\n================================================================================\n\n✅ PASS APPLICATION: main_app\n\n------------------------------------------------------------\n\n📁 FILES ANALYZED (4):\n\n✅ dashboard.py\n\n📦 Classes: SnowflakeConnectionTester\n\n🔧 Functions: main\n\n📥 Imports: os, pwd, gitlab_data_streamlit_init, snowflake.snowpark.exceptions, streamlit\n\n\n✅ show_streamlit_apps.py\n\n📦 Classes: ShowStreamlitApps\n\n🔧 Functions: main\n\n📥 Imports: pandas, gitlab_data_streamlit_init, snowflake_session, streamlit\n\n\n✅ available_packages.py\n\n📦 Classes: AvailablePackages\n\n🔧 Functions: main\n\n📥 Imports: pandas, gitlab_data_streamlit_init, streamlit\n\n\n✅ share.yml\n\n👥 Share Roles: snowflake_analyst_safe\n\n\n📄 FILE COMPLIANCE FOR MAIN_APP:\n\n✅ Required files found:\n\n✓ snowflake.yml\n\n✓ environment.yml\n\n✓ share.yml\n\n✓ README.md\n\n✓ dashboard.py\n\n\nRULES CHECKED:\n\n----------------------------------------\n\nClass Rules (1):\n\n- Inherit code for the page from GitLabDataStreamlitInit (error)\n\n\nFunction Rules (1):\n\n- Main function required (error)\n\n\nImport Rules (2):\n\n- Import GitLabDataStreamlitInit (error)\n\n- Import streamlit (error)\n\n\nFile Rules (5):\n\n- Snowflake configuration required (snowflake.yml) (error)\n\n- Snowflake environment required (environment.yml) (error)\n\n- Share specification required (share.yml) (warning)\n\n- README.md required (README.md) (error)\n\n- Starting point recommended (dashboard.py) (warning)\n\n\nSQL Rules (2):\n\n- SQL files must contain only SELECT statements (error)\n\n🗄 SELECT-only mode enabled\n\n🚨 Forbidden: INSERT, UPDATE, DELETE, DROP, ALTER...\n\n- SQL queries should include proper SELECT statements (warning)\n\n\nShare Rules (2):\n\n- Valid functional roles in share.yml (error)\n\n👥 Valid roles: 15 roles defined\n\n🔒 Safe data roles: 11 roles\n\n- Share.yml file format validation (error)\n\n------------------------------------------------------------\n\n✅ Compliance check passed\n\n-----------------------------------------------------------\n\n```\n\n\n## Developer experience that works\n\nWhether you prefer your favorite IDE, a web-based development environment, or Snowflake Snowsight, the experience remains consistent. The framework provides:\n\n- **Template-driven development:** New applications and pages are created through standardized templates, ensuring consistency and best practices from day one. No more scattered design and elements.\n\n\n```yaml\n\n╰─$ make streamlit-new-app NAME=sales_dashboard\n\n🔧 Configuration Environment: TEST\n\n📝 Configuration File: config.yml\n\n📜 Config Loader Script: ./setup/get_config.sh\n\n🐍 Python Version: 3.12\n\n📁 Applications Directory: ./src/applications\n\n🗄 Database: ...\n\n📊 Schema: ...\n\n🏗 Stage: ...\n\n🏭 Warehouse: ...\n\n🆕 Creating new Streamlit app: sales_dashboard\n\nInitialized the new project in ./src/applications/sales_dashboard\n\n```\n\n\n- **Poetry package management:** All dependencies are managed through Poetry, creating isolated environments that won't disrupt your existing Python setup.\n\n\n```toml\n\n[tool.poetry]\n\nname = \"GitLab Data Streamlit\"\n\nversion = \"0.1.1\"\n\ndescription = \"GitLab Data Team Streamlit project\"\n\nauthors = [\"GitLab Data Team \u003C*****@gitlab.com>\"]\n\nreadme = \"README.md\"\n\n\n[tool.poetry.dependencies]\n\npython = \"\u003C3.13,>=3.12\"\n\nsnowflake-snowpark-python = \"==1.32.0\"\n\nsnowflake-connector-python = {extras = [\"development\", \"pandas\", \"secure-local-storage\"], version = \"^3.15.0\"}\n\nstreamlit = \"==1.22.0\"\n\nwatchdog = \"^6.0.0\"\n\ntypes-toml = \"^0.10.8.20240310\"\n\npytest = \"==7.0.0\"\n\nblack = \"==25.1.0\"\n\nimportlib-metadata = \"==4.13.0\"\n\npyyaml = \"==6.0.2\"\n\npython-qualiter = \"*\"\n\nruff = \"^0.1.0\"\n\ntypes-pyyaml = \"^6.0.12.20250516\"\n\njinja2 = \"==3.1.6\"\n\n\n[build-system]\n\nrequires = [\"poetry-core\"]\n\nbuild-backend = \"poetry.core.masonry.api\"\n\n```\n\n- **Multi-page application support:** Creators can easily build complex applications with multiple pages and add new libraries as needed. Multi-page applications are part of the framework and a developer is focusing on the logic, not the design and structuring.\n\n\n![Multipage application example (in Snowflake)](https://res.cloudinary.com/about-gitlab-com/image/upload/v1760035999/at1q2xgmjthkrgju4okm.png)\n\n\u003Cp>\u003C/p>\n\n\u003Ccenter>\u003Ci>Multipage application example (in Snowflake)\u003C/i>\u003C/center>\n\n\u003Cp>\u003C/p>\n\n- **Seamless Snowflake integration:** Built-in connectors and authentication handling for secure data access provide the same experience, whether in local development or directly in Snowflake.\n\n\n```yaml\n\nmake streamlit-push-test APPLICATION_NAME=sales_dashboard\n\n📤 Deploying Streamlit app to test environment: sales_dashboard\n\n...\n\n------------------------------------------------------------------------------------------------------------\n\n🔗 Running share command for application: sales_dashboard\n\nRunning commands to grant shares\n\n🚀 Executing: snow streamlit share sales_dashboard with SOME_NICE_ROLE\n\n✅ Command executed successfully\n\n📊 Execution Summary: 1/1 commands succeeded\n\n```\n\n- **Comprehensive Makefile:** All common commands are wrapped in simple Makefile commands, from local development to testing and deployment, including CI/CD pipelines.\n\n\n- **Safe local development:** Everything runs in isolated Poetry environments, protecting your system while providing production-like experiences.\n\n\n![Same experience despite the environment (example of the local development)](https://res.cloudinary.com/about-gitlab-com/image/upload/v1760035999/phmubsb34hn2mfefjvqh.png)\n\n\u003Cp>\u003C/p>\n\n\u003Ccenter>\u003Ci>Same experience despite the environment (example of the local development)\u003C/i>\u003C/center>\n\n\u003Cp>\u003C/p>\n\n- **Collaboration via code:** All applications and components are wrapped up in one repository, which allows the entire organization to collaborate on the same resources and avoid double work and redundant setup.\n\n## How you can get started\n\nIf you're facing similar challenges with scattered Streamlit applications, here's how to begin and move quickly:\n\n1. **Assess your current state:** Inventory your existing applications and identify pain points.\n\n2. **Define your roles:** Separate maintainer responsibilities from creator and end users' needs.\n\n3. **Start with templates:** Create standardized application templates that enforce your security and compliance requirements.\n\n4. **Implement CI/CD:** Automate your deployment pipeline to reduce manual errors and ensure consistency.\n\n\n![Deploy the application in Snowflake](https://res.cloudinary.com/about-gitlab-com/image/upload/v1760036003/mzge9s1fhkhnx38y1a3i.png)\n\n\u003Cp>\u003C/p>\n\n\u003Ccenter>\u003Ci>The application deployed in Snowflake\u003C/i>\u003C/center>\n\n\n## The bigger picture\n\n\nThis framework represents more than just a technical solution — it's a paradigm shift toward treating data applications as first-class citizens in your enterprise (data) architecture.\n\nBy providing structure without sacrificing flexibility, the GitLab Data team created an environment where anyone in the company with minimal technical knowledge can innovate rapidly while maintaining the highest standards of security and compliance.\n\n\n### What's next?\n\nWe're continuing to enhance the framework based on user feedback and emerging needs. Future improvements include expanded template libraries, enhanced monitoring capabilities, more flexibility, and a smoother user experience.\n\n**The goal isn't just to solve today's problems, but to create a foundation that scales with your organization's growing data application needs.**\n\n## Summary\n\n[The GitLab Data Team](https://handbook.gitlab.com/handbook/enterprise-data/) transformed dozens of scattered, insecure Streamlit applications with no standardization into a unified, enterprise-grade framework that separates roles cleanly:\n\n\n1. **Maintainers** handle infrastructure and security.\n\n2. **Creators** focus on building applications without deployment headaches.\n\n3. **Viewers** access polished, compliant apps.\n\n\nAnd we used these building blocks:\n\n1. Automated **CI/CD** pipelines\n\n2. Fully collaborative and versioned code in **git**\n\n3. **Template-based** development\n\n4. Built-in **security** compliance, testing\n\n5. **Poetry-managed** environments\n\nWe eliminated the maintenance nightmare while enabling rapid innovation — proving that you can have both structure and flexibility when you treat data applications as first-class enterprise assets rather than throwaway prototypes.\n",[812,856,822,857],{"featured":6,"template":844,"slug":1056},"how-we-built-a-structured-streamlit-application-framework-in-snowflake",{"content":1058,"config":1068},{"title":1059,"description":1060,"authors":1061,"heroImage":1063,"date":1064,"body":1065,"category":780,"tags":1066},"How GitLab transforms embedded systems testing cycles","Discover how managed lifecycle environments streamline and automate virtual testing, delivering rapid feedback without environment sprawl or cost overruns.",[1021,1062],"Darwin Sanoy","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099203/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2820%29_2bJGC5ZP3WheoqzlLT05C5_1750099203484.png","2025-10-02","Embedded developers know this cycle well: write code, wait days or weeks to test on a hardware test bench, discover bugs, fix them, then wait again. Virtual testing environments promise faster feedback, but most implementations create new problems such as environment sprawl and escalating costs.\n\nGitLab's managed lifecycle environments solve these virtual testing challenges. Through virtual environment automation, GitLab accelerates embedded development cycles without the configuration complexity and cost overruns.\n\n## Virtual testing challenges\n\nVirtual testing environments — simulated hardware setups that replicate embedded system behavior and real-world conditions — offer the potential to reduce hardware bottlenecks. Teams can test firmware on simulated processors, run model-in-the-loop (MIL) tests in MATLAB/Simulink, or verify software on virtual embedded systems without waiting for physical hardware access.\n\nHowever, teams often implement virtual environments using one of two common approaches, both of which create unsustainable challenges.\n\n### Flawed approach 1: Pipeline lifecycle environments\n\n**Pipeline lifecycle environments re-create the entire testing setup for every CI/CD run.** When code changes trigger your CI/CD pipeline, the system provisions infrastructure, installs software simulations, and configures everything from scratch before running tests.\n\nThis approach works for simple scenarios but becomes inefficient as complexity rises. Consider software-in-the-loop (SIL) testing in a complex virtual environment, for example. Each pipeline run requires complete environment re-creation, including virtual processor provisioning, toolchain installations, and target configurations. **These processes can eat up considerable time.**\n\nMoreover, as embedded systems require more sophisticated virtual hardware configurations, the provisioning **costs quickly add up.**\n\nTo avoid these rebuild costs and delays, many teams turn to long-lived environments that persist between test runs. But they come with downsides. \n\n### Flawed approach 2: Long-lived environments\n\n**Long-lived environments persist indefinitely** to avoid constant rebuilding. Developers request these environments from IT or DevOps teams, wait for approval, then need someone to manually provision the infrastructure. These environments are then tied to individual developers/teams rather than specific code changes, and they support ongoing development work across multiple projects.\n\nWhile this eliminates rebuild overhead, **it creates environment sprawl.** Environments accumulate without a clear termination date. Infrastructure costs climb as environments consume resources indefinitely.\n\nLong-lived environments also suffer from **\"config rot\"** — environments retain settings, cached data, or software versions from previous tests that can affect subsequent results. A test that should fail ends up passing due to the residue of previous testing. \n\nUltimately, managing long-lived environments is a manual process that slows development velocity and increases operational overhead. \n\n**GitLab offers a third approach** through “managed lifecycle environments.” This approach captures the benefits of both long-lived and pipeline lifecycle environments while avoiding the drawbacks.\n\n## Solution: Managed lifecycle environments\n\nGitLab's managed lifecycle environments tie virtual testing setups to merge requests ([MRs](https://docs.gitlab.com/user/project/merge_requests/)) rather than pipeline runs or individual developers. You can also think of them as “managed MR test environments.” When you create an MR for a new feature, GitLab automatically orchestrates the provisioning of necessary virtual testing environments. These environments persist throughout the entire feature development process.\n\n### Key benefits\n\n* **Persistent environments without rebuilding:** The same virtual environment handles multiple pipeline runs as you iterate on your feature. Whether you're running MIL tests in MATLAB/Simulink or SIL tests on specialized embedded processors, the environment remains configured and ready.\n\n* **Automatic cleanup:** When you merge your feature and delete the branch, GitLab automatically triggers environment cleanup, eliminating environment sprawl.\n\n* **Single source of truth:** The MR records all build results, test outcomes, and environment metadata in one location. Team members can track progress and collaborate without shuffling between different tools or spreadsheets.\n\nWatch this overview video to see how managed lifecycle environments work in practice:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/9tfyVPK5DuI?si=Kj_xXNo02bnFBDhy\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nGitLab automates the entire testing workflow. Each time you run firmware tests, GitLab orchestrates testing in the appropriate virtual environment, records results, and provides full visibility into every pipeline run. This approach transforms complex virtual testing from a manual, error-prone process into automated, reliable workflows.\n\n**The result:** Teams get reusable environments without runaway costs. And they increase efficiency while maintaining clean, isolated testing setups for each feature.\n\nSee a demonstration of managed lifecycle environments for testing firmware on virtual hardware:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/iWdY-kTlpH4?si=D6rpoulr9sv6Sl6E\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\n\n## Business impact\n\nGitLab's managed lifecycle environments deliver measurable improvements across embedded development workflows. Teams running MIL testing in MATLAB/Simulink and SIL testing on specialized processors like Infineon AURIX or BlackBerry QNX systems no longer face the tradeoff between constant environment rebuilds or uncontrolled environment sprawl. Instead, these complex virtual testing setups persist throughout feature development while automatically cleaning up when complete, enabling:\n\n* Faster product development cycles  \n* Shorter time-to-market  \n* Lower infrastructure costs  \n* Higher quality assurance\n\n## Start transforming virtual testing today\n\n[**Download “Unlocking agility and avoiding runaway costs in embedded development”**](https://learn.gitlab.com/embedded-en/whitepaper-unlocking-agility-embedded-development) for a deeper exploration of managed lifecycle environments and learn how to accelerate embedded development workflows dramatically.\n",[812,1026,1067],"testing",{"featured":91,"template":844,"slug":1069},"how-gitlab-transforms-embedded-systems-testing-cycles",{"category":787,"slug":791,"posts":1071},[1072,1084,1095],{"content":1073,"config":1082},{"title":1074,"description":1075,"heroImage":1076,"authors":1077,"date":1079,"body":1080,"category":791,"tags":1081},"GitLab named a Leader in the 2025 Gartner Magic Quadrant for AI Code Assistants","GitLab recognized again as a Leader in the 2025 Gartner® Magic Quadrant™ for AI Code Assistants, for vision and execution.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1757675943/q9kb7zfiw1cyxx9fcafp.png",[1078],"Manav Khurana","2025-09-17","GitLab has been recognized for the second time as a Leader in the 2025 Gartner® Magic Quadrant™ for AI Code Assistants. We see this recognition as validation of a key pillar in our broader AI strategy, where intelligent code assistance evolves into comprehensive AI that transforms how entire teams plan, build, secure, and deploy software.\n![2025 Gartner® Magic Quadrant™ for AI Code Assistants](https://res.cloudinary.com/about-gitlab-com/image/upload/v1758121248/jfkmhddve6qvlg79xico.png)\n> [Download the report.](https://about.gitlab.com/gartner-mq-ai-code-assistants/)\n## From AI features to intelligent collaboration \n\nThe Gartner evaluation, we feel, focused on GitLab Duo's generative AI code assistance capabilities. While GitLab Duo began as an AI add-on to the GitLab DevSecOps platform, it laid the groundwork for where we are going today with agentic AI built natively into the GitLab DevSecOps platform. \n\nGitLab Duo Agent Platform enables developers to work alongside multiple AI agents that automate tasks across the software lifecycle. Agents collaborate with each other and with humans, using GitLab’s Knowledge Graph to act with full project context. This empowers teams to move faster while keeping visibility and control. \n\n* **Specialized agents** handle tasks such as code generation, security analysis, and research in parallel. \n\n* **Knowledge Graph** connects agents to a unified system of record across code, issues, pipelines, and compliance data. \n\n* **Human + agent collaboration** happens through natural-language chat and customizable flows, with review and oversight built in. \n\n* **Interoperability with external tools and systems** is supported through Model Context Protocol (MCP) and agent-to-agent frameworks. \n\nWith agents handling routine work under human guidance, teams can move faster, focus on higher-value tasks, and keep projects secure and compliant. \n\n## Secure by design, flexible in practice\n\nThe GitLab Duo Agent Platform is designed to keep security and compliance front and center. Agents run inside GitLab’s trusted DevSecOps environment, with every action visible and reviewable before changes are made. Secure integrations help ensure credentials and sensitive data are handled safely, while interoperability through open standards connects agents to external tools without exposing an organization to risk. \n\nThe platform gives teams confidence that AI is enhancing productivity without compromising governance. Here's how: \n\n* **Developers** can stay focused on complex, high-impact work, while handing off routine tasks to agents for faster results and more granular context delivered through their existing workflows. \n\n* **Engineering leaders** gain visibility into how work moves across the lifecycle, with agents operating within clear guardrails. They also can ensure their teams stay aligned to priorities and simplify onboarding with guided support through agent-driven context and workflows.  \n\n* **IT organizations** maintain control over agent activity with governance features that enforce coding and security policies, offer model selection flexibility, and ensure secure interoperability — all while keeping humans in the loop. \n\n## Leading the move to AI-native development\n\nGitLab continues to build on the vision that began with Duo, and will continue to expand GitLab Duo Agent Platform with new agents, advanced workflows, and more orchestration capabilities. This commitment to innovation ensures you can amplify team productivity on the platform you know and trust. Stay tuned for exciting updates on our roadmap as we continue to revolutionize AI-native DevSecOps. \n\n> [Download the 2025 Gartner® Magic Quadrant™ for AI Code Assistants](https://about.gitlab.com/gartner-mq-ai-code-assistants/) and [try GitLab Duo Agent Platform today](https://about.gitlab.com/gitlab-duo/agent-platform/).\n\n*Source: Gartner, Magic Quadrant for AI Code Assistants, Philip Walsh, Haritha Khandabattu, Matt Brasier, Keith Holloway, Arun Batchu, 15 September 2025* \n\n*GARTNER is a registered trademark and service mark of Gartner, Inc. and/or its affiliates in the U.S. and internationally, and MAGIC QUADRANT is a registered trademark of Gartner, Inc. and/or its affiliates and are used herein with permission. All rights reserved.* \n\n*Gartner does not endorse any vendor, product or service depicted in its research publications, and does not advise technology users to select only those vendors with the highest ratings or other designation. Gartner research publications consist of the opinions of Gartner’s research organization and should not be construed as statements of fact. Gartner disclaims all warranties, expressed or implied, with respect to this research, including any warranties of merchantability or fitness for a particular purpose.* \n\n*This graphic was published by Gartner Inc. as part of a larger report and should be evaluated in the context of the entire document. The Gartner document is available upon request from Gartner B.V.*",[812,791,573],{"featured":91,"template":844,"slug":1083},"gitlab-named-a-leader-in-the-2025-gartner-magic-quadrant-for-ai-code-assistants",{"config":1085,"content":1087},{"featured":6,"template":844,"slug":1086},"gitlab-and-accenture-announce-global-reseller-agreement",{"title":1088,"description":1089,"category":791,"tags":1090,"authors":1091,"heroImage":1092,"date":1093,"body":1094},"GitLab and Accenture announce Global Reseller Agreement","New reseller agreement empowers Accenture to offer GitLab's comprehensive DevSecOps platform.",[791,812,573],[929],"https://res.cloudinary.com/about-gitlab-com/image/upload/v1751568278/bots3gyfarx8qysbkw6c.png","2025-09-15","We're excited to announce that GitLab and Accenture have signed a global reseller agreement, establishing Accenture as an authorized GitLab reseller and Professional Services Provider. This agreement enables Accenture to provide GitLab's complete DevSecOps platform directly to customers through multiple fulfillment channels, including the AWS Marketplace.\n\n## A milestone in collaboration\n\nThis collaboration combines GitLab's comprehensive, intelligent DevSecOps platform with Accenture's extensive expertise in digital transformation and implementation services, enabling organizations to build and deliver secure software at scale. The global reseller agreement provides a global framework that can be easily adapted to local conditions.\n\nThe collaboration will initially focus on several key areas:\n\n1. **Enterprise-scale DevSecOps Transformation:** Helping organizations modernize their development practices and streamline their software delivery lifecycle  \n2. **Mainframe Modernization:** Assisting customers with migrating from legacy systems  \n3. **GitLab Duo with Amazon Q:** Offering AI-driven software development to organizations looking to accelerate development velocity while maintaining end-to-end security and compliance\n\n## Looking ahead\n\nWe’re looking forward to helping our joint customers accelerate innovation, streamline development processes, and strengthen their security posture to achieve their business objectives more effectively.\n\nFor more information about how GitLab and Accenture can help your organization, please [visit our partner site](https://about.gitlab.com/partners/channel-partners/#/2328213) or contact your Accenture or GitLab representative.",{"content":1096,"config":1106},{"title":1097,"description":1098,"authors":1099,"heroImage":1101,"date":1102,"body":1103,"category":791,"tags":1104},"GitLab at Next '25: Transforming app modernization","GitLab participated in Google Cloud Next ‘25 and received a fifth consecutive Google Cloud Technology Partner of the Year recognition.",[1100],"Regnard Raquedan","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663121/Blog/Hero%20Images/LogoLockupPlusLight.png","2025-04-11","GitLab's presence at Google Cloud Next '25 highlighted our strong partnership with Google Cloud and our joint commitment to accelerating software development and delivery. We were recognized again as a Technology Partner of the Year, and included in key enterprise initiatives like Google Distributed Cloud (GDC) Build Partners and [Startup Perks from Google Cloud](https://cloud.google.com/blog/topics/startups/why-global-startups-are-gathering-at-google-cloud-next25?e=13802955). Our team members demonstrated for attendees how GitLab is positioned to be a critical DevSecOps service for Google Cloud customers.\n\n## Continuing our award-winning partnership excellence\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175937/Blog/nempa4yvfutedz3fpuxx.jpg\" alt=\"GitLab team at Google Cloud Next '25\" align=\"left\" width=\"400px\" style=\"padding-right: 20px; padding-bottom: 10px\"/>\n\nWe're thrilled to announce that GitLab has once again been named a [Google Cloud Technology Partner of the Year award winner](https://about.gitlab.com/press/releases/2025-04-08-gitlab-wins-a-google-cloud-technology-partner-of-the-year-award-for-devops/), marking our fifth consecutive time receiving this prestigious honor. This remarkable achievement reaffirms our position as Google Cloud's primary DevOps partner, consistently delivering exceptional value year after year. The continued recognition highlights how our collaboration with Google Cloud creates tangible business outcomes for customers, enabling organizations across industries to build, secure, and deploy applications with efficiency and confidence.\n\n## Google Distributed Cloud: DevSecOps for highly regulated environments\n\nAnother significant milestone announced at Next '25 was GitLab's \"Google Cloud Ready - Distributed Cloud\" certification. This designation enables organizations to implement GitLab in air-gapped environments, addressing critical security and compliance requirements.\n\nAs an end-to-end DevSecOps solution available on Google Distributed Cloud, GitLab enables sovereign development and operations for workloads critical to national security and regulatory compliance. This integration is particularly valuable for government agencies and financial institutions that require the highest levels of data sovereignty while maintaining modern development practices.\n\n## GitLab perks for Google Startups\n\nGitLab is a Featured Partner of the new Startup Perks program from Google Cloud. This partnership ties up with our own [GitLab for Startups](https://about.gitlab.com/solutions/startups/google-cloud/) and is meant to jumpstart new tech ventures with key DevSecOps capabilities that can help with fast growth and scaling.\n\nAs one of the [Featured Perks partners](https://cloud.google.com/startup/perks), eligible startups can get free or discounted access to one year of [GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/) for 20 licenses. For seed or early stage startups, this benefit can help ensure collaboration, efficiency, and security without sacrificing speed and agility.\n\n## Thoughts from the dais\n\nGitLab experts shared valuable insights across multiple speaking sessions at Next '25, delivering practical knowledge on AI-powered DevSecOps, platform engineering, and cloud application delivery:\n\n* __[AI DevOps panel](https://cloud.withgoogle.com/next/25/session-library?session=BRK2-163&utm_source=copylink&utm_medium=unpaidsoc&utm_campaign=FY25-Q2-global-EXP106-physicalevent-er-next25-mc&utm_content=reg-is-live-next-homepage-social-share&utm_term=-):__ Mike Flouton, GitLab Vice President of Product Management, joined industry leaders to discuss how AI code assist tools boost productivity while enhancing application performance.\n\n* __[Software Logistics - The Missing Link in Modern Platform Engineering](https://cloud.withgoogle.com/next/25/session-library?session=CT2-16&utm_source=copylink&utm_medium=unpaidsoc&utm_campaign=FY25-Q2-global-EXP106-physicalevent-er-next25-mc&utm_content=reg-is-live-next-homepage-social-share&utm_term=-):__ GitLab Field CTO Lee Faus explored how effective software logistics create the foundation for successful platform engineering initiatives.\n\n* __[Revolutionizing Cloud Application Delivery with Intelligent Agents](https://cloud.withgoogle.com/next/25/session-library?session=CT2-17&utm_source=copylink&utm_medium=unpaidsoc&utm_campaign=FY25-Q2-global-EXP106-physicalevent-er-next25-mc&utm_content=reg-is-live-next-homepage-social-share&utm_term=-):__ Faus also demonstrated how intelligent agents are transforming cloud application delivery pipelines.\n\n## Engaging attendees across Next '25\n\nIn addition to our speaking sessions, GitLab maintained a strong presence throughout Next '25. At our booth #2170 on the expo floor, our team engaged with hundreds of attendees through demonstrations and lightning talks featuring both GitLab experts and partners like Arctiq and SADA.\n\nThe Google Cloud Makerspace's Dev Tools Pantry became a hub of innovation and collaboration. John Coghlan, Director of Developer Advocacy, observed: \"It was great to connect with many GitLab and Google Cloud customers in the Dev Tools Pantry in the Makerspace. We loved seeing the creative solutions that people came up with around developer experience and simplified deployments using GitLab and Google Cloud as their ingredients.\"\n\nThese hands-on experiences showcased how GitLab's DevSecOps solutions integrate well with Google Cloud services, with our AI-powered capabilities demonstrations drawing particular interest from attendees looking to enhance developer productivity and application security.\n\n## GitLab and Google Cloud: Transforming the future together\n\nThe energy witnessed at Next '25 exemplifies why GitLab and Google Cloud make such powerful partners. Together, we help organizations to transform how they build, secure, and deploy applications through:\n\n* AI-assisted development capabilities and collaborative workflows that can help accelerate innovation in Google Cloud environments\n\n* Shift-left security approach that integrates with Google Cloud's security-first architecture to identify vulnerabilities early in the development lifecycle\n\n* Flexible deployment options and comprehensive observability that work harmoniously with Google Cloud infrastructure to help streamline operations\n\nAs demonstrated at Next '25, the GitLab and Google Cloud partnership delivers tangible advantages for development teams facing real-world challenges – whether accelerating AI adoption, strengthening security in regulated environments, or streamlining complex deployment pipelines. The technical integration points and customer success stories shared throughout the event underscore that this collaboration continues to produce practical solutions that matter.\n\n> #### Discover how GitLab and Google Cloud can transform your application development experience at [GitLab's Google Cloud partnership page](https://about.gitlab.com/partners/technology-partners/google-cloud-platform/).",[1105,542,278,283,791],"google",{"slug":1107,"featured":6,"template":844},"gitlab-at-next-25-transforming-app-modernization",{"category":798,"slug":802,"posts":1109},[1110,1123,1136],{"content":1111,"config":1121},{"title":1112,"description":1113,"authors":1114,"heroImage":1116,"date":1117,"body":1118,"category":802,"tags":1119},"What’s new in Git 2.51.0?","Learn about the latest contributions from GitLab's Git team and the Git community, including performance optimizations for git-push(1) and git-fetch(1).",[1115],"Karthik Nayak","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663087/Blog/Hero%20Images/git3-cover.png","2025-08-18","The Git project recently released [Git 2.51](https://lore.kernel.org/git/xmqqikikk1hr.fsf@gitster.g/T/#u).\nDue to summer in the Northern Hemisphere and slower progress, this\nrelease cycle was on the shorter side of 8 weeks (typically a release cycle\nlasts about 12 weeks). Let’s look at some notable changes in this release,\nincluding contributions from the Git team at GitLab and also the wider Git\ncommunity.\n\n\n## Performance optimizations for `git-push(1)` and `git-fetch(1)`\n\n\nThe `git-push(1)` and `git-fetch(1)` commands allow users to synchronize local and remote repositories. Part of the operation involves updating references in the repository. In repositories with many references, this can take significant time, especially for users who work with large development environments, monorepos, or repositories with extensive CI/CD pipelines.\n\nGit reference transactions can include multiple reference updates, but they follow an all-or-nothing approach. If any single update within the transaction fails, the entire transaction fails and none of the reference updates are applied. But reference updates as part of `git-push(1)` and `git-fetch(1)` are allowed to fail, which allows repositories to synchronize a subset of references even in the case where a different subset has diverged. To facilitate this behavior, Git creates a separate transaction for each reference update, allowing some transactions to fail while the rest succeed. \n\nCreating a separate transaction per update incurs significant overhead, as each transaction includes an initiation and teardown phase and also checks for whether there are conflicting reference names. The “reftable” backend also performs auto-compaction at the end of a transaction, so multiple transactions would trigger multiple auto-compactions, which would drastically increase the latency of the command. \n\nIn Git 2.51.0, these commands now use batched updates instead of separate transactions. Batched updates allow updating multiple references under a single transaction, while still allowing some updates to fail. This removes the overhead and scales better with the number of references to be updated, since only a single transaction is used. This significantly improves the performance of the “reftable” backend, which now outperforms the “files” backend. Users can reap these performance improvements without needing to make any changes.\n\nFor `git-fetch(1)` we see a *22x performance improvement for the “reftable” backend* and *1.25x improvement for the “files” backend* when used in a repository with 10,000 references.\n\n\n```\n\nBenchmark 1: fetch: many refs (refformat = reftable, refcount = 10000, revision = master)\n  Time (mean ± σ):      3.403 s ±  0.775 s    [User: 1.875 s, System: 1.417 s]\n  Range (min … max):    2.454 s …  4.529 s    10 runs\n\nBenchmark 2: fetch: many refs (refformat = reftable, refcount = 10000, revision = HEAD)\n  Time (mean ± σ):     154.3 ms ±  17.6 ms    [User: 102.5 ms, System: 56.1 ms]\n  Range (min … max):   145.2 ms … 220.5 ms    18 runs\n\nSummary\n  fetch: many refs (refformat = reftable, refcount = 10000, revision = HEAD) ran\n   22.06 ± 5.62 times faster than fetch: many refs (refformat = reftable, refcount = 10000, revision = master)\n\nBenchmark 1: fetch: many refs (refformat = files, refcount = 10000, revision = master)\n  Time (mean ± σ):     605.5 ms ±   9.4 ms    [User: 117.8 ms, System: 483.3 ms]\n  Range (min … max):   595.6 ms … 621.5 ms    10 runs\n\nBenchmark 2: fetch: many refs (refformat = files, refcount = 10000, revision = HEAD)\n  Time (mean ± σ):     485.8 ms ±   4.3 ms    [User: 91.1 ms, System: 396.7 ms]\n  Range (min … max):   477.6 ms … 494.3 ms    10 runs\n\nSummary\n  fetch: many refs (refformat = files, refcount = 10000, revision = HEAD) ran\n    1.25 ± 0.02 times faster than fetch: many refs (refformat = files, refcount = 10000, revision = master)\n```\n\n\nFor `git-push(1)` we see a *18x performance improvement for the reftable backend* and *1.21x improvement for the “files” backend* when used in a repository with 10,000 references.\n\n\n```\n\nBenchmark 1: push: many refs (refformat = reftable, refcount = 10000, revision = master)\n  Time (mean ± σ):      4.276 s ±  0.078 s    [User: 0.796 s, System: 3.318 s]\n  Range (min … max):    4.185 s …  4.430 s    10 runs\n\nBenchmark 2: push: many refs (refformat = reftable, refcount = 10000, revision = HEAD)\n  Time (mean ± σ):     235.4 ms ±   6.9 ms    [User: 75.4 ms, System: 157.3 ms]\n  Range (min … max):   228.5 ms … 254.2 ms    11 runs\n\nSummary\n  push: many refs (refformat = reftable, refcount = 10000, revision = HEAD) ran\n   18.16 ± 0.63 times faster than push: many refs (refformat = reftable, refcount = 10000, revision = master)\n\nBenchmark 1: push: many refs (refformat = files, refcount = 10000, revision = master)\n  Time (mean ± σ):      1.121 s ±  0.021 s    [User: 0.128 s, System: 0.975 s]\n  Range (min … max):    1.097 s …  1.156 s    10 runs\n\nBenchmark 2: push: many refs (refformat = files, refcount = 10000, revision = HEAD)\n  Time (mean ± σ):     927.9 ms ±  22.6 ms    [User: 99.0 ms, System: 815.2 ms]\n  Range (min … max):   903.1 ms … 978.0 ms    10 runs\n\nSummary\n  push: many refs (refformat = files, refcount = 10000, revision = HEAD) ran\n    1.21 ± 0.04 times faster than push: many refs (refformat = files, refcount = 10000, revision = master)\n```\n\n\nThis [project](https://lore.kernel.org/git/20250514-501-update-git-fetch-1-to-use-partial-transactions-v1-0-7c65f46493d4@gmail.com/) was led by [Karthik Nayak](https://gitlab.com/knayakgl).\n\n\n## Planning towards Git 3.0\n\n\n11 years ago, Git 2.0 was released, which was the last major version release of Git. While we don’t have a specific timeline for the next major Git release, this release includes decisions made towards Git 3.0.\n\n\nThe Git 3.0 release planning allows us to plan for and implement breaking changes and communicate them to the extended Git community. Next to documentation, Git can also be compiled with these breaking changes for those who want to experiment with these changes. More information can be found in the [BreakingChanges document](https://gitlab.com/gitlab-org/git/-/blob/master/Documentation/BreakingChanges.adoc). \n\n\nThe Git 2.51.0 release makes some significant changes towards Git 3.0. \n\n\n### Reftable as the default reference backend\n\n\nIn the [Git 2.45.0](https://gitlab.com/gitlab-org/git/-/blob/master/Documentation/RelNotes/2.45.0.adoc?ref_type=heads) release, the “reftable” format was introduced as a new backend for storing references like branches or tags in Git, which fixes many of the issues with the existing \"files\" backend. Please read our [beginner's guide to how reftables work](https://about.gitlab.com/blog/a-beginners-guide-to-the-git-reftable-format/) for more insight into the “reftable” backend.\n\n\nThe Git 2.51.0 release marks the switch to using the \"reftable\" format as default in Git 3.0 for newly created repositories and also wires up the change behind a feature flag. The “reftable” format provides the following improvements over the traditional “files” backend:\n\n\n* It is impossible to store two references that only differ in casing on case-insensitive filesystems with the \"files\" format. This issue is common on Windows and macOS platforms. As the \"reftable\" backend does not use filesystem paths to encode reference names this problem goes away.\n\n* Similarly, macOS normalizes path names that contain unicode characters, which has the consequence that you cannot store two names with unicode characters that are encoded differently with the \"files\" backend. Again, this is not an issue with the \"reftable\" backend.\n\n* Deleting references with the \"files\" backend requires Git to rewrite the complete \"packed-refs\" file. In large repositories with many references this file can easily be dozens of megabytes in size; in extreme cases it may be gigabytes. The \"reftable\" backend uses tombstone markers for deleted references and thus does not have to rewrite all of its data.\n\n* Repository housekeeping with the \"files\" backend typically performs all-into-one repacks of references. This can be quite expensive, and consequently housekeeping is a tradeoff between the number of loose references that accumulate and slow down operations that read references, and compressing those loose references into the \"packed-refs\" file. The \"reftable\" backend uses geometric compaction after every write, which amortizes costs and ensures that the backend is always in a well-maintained state.\n\n* Operations that write multiple references at once are not atomic with the \"files\" backend. Consequently, Git may see in-between states when it reads references while a reference transaction is in the process of being committed to disk.\n\n* Writing many references at once is slow with the \"files\" backend because every reference is created as a separate file. The \"reftable\" backend significantly outperforms the \"files\" backend by multiple orders of magnitude.\n\n* The “reftable” backend uses a binary format with prefix compression for reference names. As a result, the format uses less space compared to the \"packed-refs\" file.\n\n\nThis project was led by [Patrick Steinhardt](https://gitlab.com/pks-gitlab).\n\n\n### SHA-256 as the default hash function\n\n\nThe Git version control system stores objects in a content-addressable filesystem. This means it uses the hash of an object to address content such as files, directories, and revisions, unlike traditional filesystems, which use sequential numbers. Using a hash function has the following advantages: \n\n\n* Easy integrity checks as a single bit flip would change the hash output completely.\n\n* Fast object lookup as objects can be indexed by their hash.\n\n* Object names can be signed and third parties can trust the hash to address the signed object and all objects it references.\n\n* Communication using Git protocol and out of band communication methods have a short reliable string that can be used to reliably address stored content.\n\n\nSince its inception, Git has used the SHA-1 hashing algorithm. However, security researchers have discovered some flaws in SHA-1, specifically the [SHAttered attack](https://shattered.io), which shows a practical SHA-1 hash collision. We moved to using a hardened SHA-1 implementation by default since Git 2.13.0. However, SHA-1 is still a weak hashing algorithm and it is only a matter of time before additional attacks will further reduce its security.\n\n\nSHA-256 was identified as the successor to SHA-1 in late 2018. Git 2.51.0 marks it as the default hash algorithm to be used in Git 3.0.\n\n\nThis project was led by [brian m. carlson](https://github.com/bk2204).\n\n\n### Removal of `git-whatchanged(1)`\n\n\nThe `git-whatchanged(1)` command shows logs with differences each commit introduces. While this is now succeeded by `git log --raw`, the command was kept around for historical reasons. \n\n\nGit 2.51.0 requires users of the command to explicitly use the `--i-still-use-this` flag to capture any users who still use the deprecated command, and also marks the command for removal in Git 3.0. \n\n\nThis project was led by [Junio C Hamano](https://simple.wikipedia.org/wiki/Junio_Hamano).\n\n\n## `git switch` and `git restore` are no longer experimental\n\n\nThe `git-checkout(1)` command can be used for multiple different use cases. It can be used for switching references:\n\n\n```\n$ git status\nOn branch master\nYour branch is up to date with 'origin/master'.\n\nnothing to commit, working tree clean\n\n$ git checkout next\nSwitched to branch 'next'\nYour branch is up to date with 'origin/next'.\n```\n\n\nOr for restoring files:\n\n\n```\n$ echo \"additional line\" >> git.c\n\n$ git status\nOn branch master\nYour branch is up to date with 'origin/master’.\n\nChanges not staged for commit:\n  (use \"git add \u003Cfile>...\" to update what will be committed)\n  (use \"git restore \u003Cfile>...\" to discard changes in working directory)\n    modified:   git.c\n\nno changes added to commit (use \"git add\" and/or \"git commit -a\")\n\n$ git checkout git.c\nUpdated 1 path from the index\n\n$ git status\nOn branch master\nYour branch is up to date with 'origin/master’.\n\nnothing to commit, working tree clean\n```\n\n\nFor new users of Git, this can cause a lot of confusion. So in Git 2.33.0, these were split into two new commands, `git-switch(1)` and `git-restore(1)`.\n\nThe `git-switch(1)` command allows users to switch to a specific branch: \n\n\n```\n$ git status\nOn branch master\nYour branch is up to date with 'origin/master'.\n\nnothing to commit, working tree clean\n\n$ git switch next\nSwitched to branch 'next'\nYour branch is up to date with 'origin/next'.\n```\n\n\nAnd the `git-restore(1)` command allows users to restore working tree files: \n\n\n```\n$ echo \"additional line\" >> git.c\n\n$ git status\nOn branch master\nYour branch is up to date with 'origin/master’.\n\nChanges not staged for commit:\n  (use \"git add \u003Cfile>...\" to update what will be committed)\n  (use \"git restore \u003Cfile>...\" to discard changes in working directory)\n    modified:   git.c\n\nno changes added to commit (use \"git add\" and/or \"git commit -a\")\n\n$ git restore git.c\n\n$ git status\nOn branch master\nYour branch is up to date with 'origin/master’.\n\nnothing to commit, working tree clean\n```\n\n\nWhile the two commands have existed since 2019, they were marked as experimental. The effect is that the Git project doesn’t guarantee backwards compatibility for those commands: the behavior may change at any point in time. While the intent originally was to stabilize those commands after a couple of releases, this hasn’t happened up to this point.\n\nThis has led to several discussions on the Git mailing list where users are unsure whether they can start using these new commands, or whether they might eventually go away again. But given that no significant changes have ever been proposed, and that some users are already using these commands, we have decided to no longer declare them as experimental in Git 2.51.\n\nThis project was led by [Justin Tobler](https://gitlab.com/justintobler).\n\n\n## `git for-each-ref(1)` receives pagination support\n\n\nThe `git for-each-ref` command is used to list all references present in the repository. As it is part of the plumbing layer of Git, this command is frequently used for example by hosting forges to list references that exist in the repository in their UI. But as repositories grow, it becomes less realistic to list all references at once – after all, the largest repositories may contain millions of them! So instead, forges tend to paginate the references.\n\n\nThis surfaces an important gap: `git-for-each-ref` does not know to skip references from previous pages that have already been shown. Consequently, it may have to list a large number of uninteresting references before it finally starts to yield the references required for the current page. This is inefficient and leads to higher-than-necessary latency or even timeouts.\n\n\nGit 2.51.0 supports a new `--start-after` flag for `git for-each-ref`, which allows paginating the output. This can also be combined with the `--count` flag to iterate over a batch of references. \n\n\n```\n$ git for-each-ref --count=10\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-001\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-002\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-003\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-004\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-005\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-006\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-007\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-008\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-009\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-010\n\n$ git for-each-ref --count=10 --start-after=refs/heads/branch-010\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-011\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-012\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-013\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-014\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-015\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-016\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-017\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-018\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-019\n9751243fba48b34d29aabfc9784803617a806e81 commit    refs/heads/branch-020\n```\n\n\nThis project was led by [Karthik Nayak](https://gitlab.com/knayakgl).\n\n\n## What's next?\n\n\nReady to experience these improvements? Update to Git 2.51.0 and start using `git switch` and `git restore` in your daily workflow. \n\n\nFor GitLab users, these performance enhancements will automatically improve your development experience once your Git version is updated.\n\n\nLearn more in the [official Git 2.51.0 release notes](https://lore.kernel.org/git/xmqqikikk1hr.fsf@gitster.g/T/#u) and explore our [complete archive of Git development coverage](https://about.gitlab.com/blog/tags/git/).\n",[1120,960,268],"git",{"featured":6,"template":844,"slug":1122},"what-s-new-in-git-2-51-0",{"content":1124,"config":1134},{"title":1125,"description":1126,"body":1127,"authors":1128,"heroImage":1131,"category":802,"tags":1132,"date":1133},"How we use GitLab to grow open source communities","Learn how to use the DevSecOps platform to solve onboarding problems for new contributors.","GitLab's Contributor Success team faced a challenge.\n\nWhile our returning open source contributors were merging more code changes and collaborating on deeper features, first-time contributors were struggling to get started. We knew many newcomers to open source often gave up or never asked for help. But as advocates for [GitLab's mission](https://handbook.gitlab.com/handbook/company/mission/)\n\nto enable everyone to contribute, we wanted to do better.\n\n\nWe started running research studies on open source contributors to GitLab. Then we improved the stumbling blocks. In January, we achieved a record of 184 unique community contributors to GitLab in a single month,\n\nexceeding our team target of 170 for the first time.\n\n\nThree months later, we broke it again with 192.\n\n\nHere's how we used GitLab's own tools to solve the newcomer dilemma and grow our open source community.\n\n\n## What we learned studying first-time contributors\n\n\nIn 2023, we conducted the first-ever user study of GitLab open source contributors.\n\nWe watched six participants who had never contributed to GitLab make their first attempt. They completed diary studies and Zoom interviews detailing their experience.\n\n\nParticipants told us:\n\n\n* The contributor documentation was confusing\n\n* Getting started felt overwhelming\n\n* It wasn't clear how or where to find help\n\n\nOnly one out of the six participants successfully merged a code contribution to GitLab during the study.\n\n\nIt became clear we needed to focus on the onboarding experience if we wanted new contributors to succeed.\n\nSo we [iterated](https://handbook.gitlab.com/handbook/values/#iteration)!\n\n\nOur team spent the next year addressing their challenges. We used GitLab tools,\n\nsuch as issue templates, scheduled pipelines, webhooks, and the GitLab Query Language (GLQL), to build an innovative semi-automated onboarding solution.\n\n\nIn 2025, we performed a follow-up user study with new participants who had never made a contribution to GitLab. All 10 participants successfully created and merged contributions to GitLab, a 100% success rate. The feedback showed a great appreciation for the new onboarding process, the speed at which\n\nmaintainers checked in on contributors, and the recognition we offered to contributors.\n\n\nEven better, participants shared how much fun they had contributing:\n\n\"I felt a little rush of excitement at being able to say 'I helped build GitLab.'\"\n\n\n## We built personal onboarding with GitLab\n\n\nOur solution started with engagement.\n\nTo help newcomers get started, we introduced a personal onboarding process connecting each\n\ncontributor with a community maintainer.\n\n\nWe created an [issue template](https://gitlab.com/gitlab-community/meta/-/blob/ac0e5579a6a1cf26e367010bfcf6c7d35b38d4f8/.gitlab/issue_templates/Onboarding.md) with a clear checklist of tasks.\n\n\nThe onboarding issue also handles access approval for the\n\n[GitLab community forks](https://about.gitlab.com/blog/gitlab-community-forks/),\n\na collection of shared projects that make it easier to push changes, collaborate with others,\n\nand access GitLab Ultimate and Duo features.\n\n\nUsing [scoped labels](https://docs.gitlab.com/user/project/labels/#scoped-labels), we indicate the status of the access request for easy maintainer follow-ups.\n\n\n![GitLab onboarding issue](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752512804/vkiyl0hrfbgcer3nz38r.png)\n\n\nWe started with a Ruby script run via a [scheduled pipeline](https://docs.gitlab.com/ci/pipelines/schedules/),\n\nchecking for new access requests and using the issue template to create personalized onboarding issues.\n\n\nFrom here, our maintainers engage with new contributors to verify access, answer questions, and find issues.\n\n\n## We standardized responses with comment templates\n\n\nWith multiple maintainers in the GitLab community, we wanted to ensure consistent and clear messaging.\n\n\nWe created [comment templates](https://docs.gitlab.com/user/profile/comment_templates/),\n\nwhich we sync with the repository using the GraphQL API and a\n\n[Ruby script](https://gitlab.com/gitlab-community/meta/-/blob/dd6e0c2861c848251424b72e3e8c5603dcaac725/bin/sync_comment_templates.rb).\n\n\nThe script is triggered in `.gitlab-ci.yml` when comment template changes are pushed\n\nto the default branch (a dry run is triggered in merge requests).\n\n\n```yaml\n\nexecute:sync-comment-templates:\n  stage: execute\n  extends: .ruby\n  script:\n    - bundle exec bin/sync_comment_templates.rb\n  variables:\n    SYNC_COMMENT_TEMPLATES_GITLAB_API_TOKEN: $SYNC_COMMENT_TEMPLATES_GITLAB_API_TOKEN_READ_ONLY\n  rules:\n    - if: $CI_PIPELINE_SOURCE == 'schedule' || $CI_PIPELINE_SOURCE == \"trigger\"\n      when: never\n    - if: $EXECUTE_SYNC_COMMENT_TEMPLATES == '1'\n    - if: $CI_MERGE_REQUEST_IID\n      changes:\n        - .gitlab/comment_templates/**/*\n      variables:\n        REPORT_ONLY: 1\n    - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH\n      changes:\n        - .gitlab/comment_templates/**/*\n      variables:\n        FORCE_SYNC: 1\n        DRY_RUN: 0\n        SYNC_COMMENT_TEMPLATES_GITLAB_API_TOKEN: $SYNC_COMMENT_TEMPLATES_GITLAB_API_TOKEN_READ_WRITE\n```\n\n\n\n\n![GitLab comment template](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752512803/qmfaymqhq3zgdcnm6a3j.png)\n\n\n\n\n## We eliminated the 5-minute wait time\n\n\nOur first iteration was a little slow.\n\nAfter starting the onboarding process, contributors wondered what to do next while the scheduled\n\npipeline took up to 5 minutes to create their onboarding issue.\n\nFive minutes feels like forever when you have the momentum to dive in.\n\n\n[Niklas](https://gitlab.com/Taucher2003), a member of our [Core team ](https://about.gitlab.com/community/core-team/), built a solution.\n\nHe added [webhook events for access requests](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/163094)\n\nand [custom payload templates for webhooks](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/142738).\n\n\nThese features together allowed us to trigger a pipeline immediately instead of waiting for the schedule.\n\nThis reduces the time to roughly 40 seconds (the time it takes for the CI pipeline to run)\n\nand generates the onboarding issue right away. It also saves thousands of wasted pipelines and compute minutes when no access requests actually need processing.\n\n\nWe set up a [pipeline trigger token](https://docs.gitlab.com/ci/triggers/#create-a-pipeline-trigger-token)\n\nand used this as the target for the webhook, passing the desired environment variables:\n\n\n```json\n\n{\n  \"ref\": \"main\",\n  \"variables\": {\n    \"EXECUTE_ACCESS_REQUESTS\": \"1\",\n    \"DRY_RUN\": \"0\",\n    \"PIPELINE_NAME\": \"Create onboarding issues\",\n    \"GROUP_ID\": \"{{group_id}}\",\n    \"EVENT_NAME\": \"{{event_name}}\"\n  }\n}\n\n```\n\n\n![Pipeline list](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752512805/qom7hnqnwfcdzvria7dd.png)\n\n\n## We automated follow-ups\n\n\nWith an increasing volume of customers and community contributors onboarding to the GitLab community,\n\nmaintainers struggled to track which issues needed attention and some follow-up questions got lost.\n\n\nWe built automation leveraging webhooks and Ruby to label issues updated by community members.\n\nThis creates a clear signal of issue status for maintainers.\n\n\n[GitLab Triage](https://gitlab.com/gitlab-org/ruby/gems/gitlab-triage)\n\nautomatically nudges idle onboarding issues to ensure we maintain contributor momentum.\n\n\n![Automated nudge for idle GitLab onboarding issues](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752512811/gkj3qaidjl1vv2dlu8ep.png)\n\n\n## We organized issue tracking with GLQL\n\n\nWe built a [GLQL view](https://docs.gitlab.com/user/glql/) to keep track of issues.\n\nThis GLQL table summarizes onboarding issues which need attention,\n\nso maintainers can review and follow up with community members.\n\n\n![GLQL view of issue tracking](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752512804/hdduf0orntdfhkysheae.png)\n\n\nThese GLQL views improved our overall triage [efficiency](https://handbook.gitlab.com/handbook/values/#efficiency).\n\nIt was so successful we ended up using this strategy within the [GitLab for Open Source](https://about.gitlab.com/solutions/open-source/)\n\nand [GitLab for Education](https://about.gitlab.com/solutions/education/) programs, too.\n\nWith GLQL tables for support issues, these community programs lowered their response times by 75%.\n\n\n## We made the README findable\n\n\nThe [@gitlab-community group](https://gitlab.com/gitlab-community/)\n\nis the home for contributors on Gitlab.com.\n\nWe already had a `README.md` file explaining the community forks and onboarding process, but this file\n\nlived in our meta project.\n\nWith our follow-up user study, we discovered this was a point of confusion for newcomers when their\n\nonboarding issues were under a different project.\n\n\nWe used [GitLab's project mirroring](https://docs.gitlab.com/user/project/repository/mirror/)\n\nto solve this and mirrored the meta project to `gitlab-profile`.\n\nThis surfaced the existing README file at the group level, making it easier to discover.\n\n\n![GitLab project mirroiring](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752512809/kbgdxyilza71kmj0aeqt.png)\n\n\n![Group README](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752512804/taosgn8vvgo8onszuwaf.png)\n\n\n## The results speak for themselves\n\n\nBy dogfooding GitLab, we improved the stumbling blocks found in our research studies\n\nand transformed the GitLab contributor journey.\n\nWe have grown the number of customers and community members contributing to GitLab,\n\nadding features to the product, solving bugs, and adding to our CI/CD catalog.\n\n\nOur onboarding process has increased the rate newcomers join the community, and our total number of\n\ncontributors on the community forks has doubled over the last 9 months.\n\n\n![Community forks growth chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752512803/xagra4vfsrhbcwnzekmp.png)\n\n\nWe reduced the time it takes for newcomers to make their first contribution by connecting them\n\nwith maintainers faster and supporting them in getting started.\n\nWe use [GitLab's value stream analytics](https://docs.gitlab.com/user/group/value_stream_analytics/)\n\nto track our response rates.\n\n\n* First response time from community maintainers is down to 46 minutes over the last 3 months\n\n* Average approval time for community forks access is down to 1 hour over the last 3 months\n\n\n![Value stream analytics timeline](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752512812/jzksakrfdb22hooqemzh.png)\n\n\nThe 100% success rate of our 2025 user study confirmed these improvements for our first-time contributors.\n\n\n## We invested time savings into contributor recognition\n\n\nFixing these newcomer challenges allowed us more capacity to focus on better recognition of\n\ncontributors, incentivizing first-timers to keep coming back.\n\nThe result is [contributors.gitlab.com](https://contributors.gitlab.com/).\n\nWe built out a central hub for our contributors that features gamified leaderboards,\n\nachievements, and rewards.\n\nContributors can see their impact, track progress, and grow in the community.\n\n\n## Sharing what we learned\n\n\nThese improvements work and are repeatable for other open source projects.\n\nWe are sharing our approach across communities and conferences so that other projects can consider using these tools to grow.\n\n\nAs more organizations learn the barriers to participation, we can create a more welcoming open source environment.\n\nWith these GitLab tools, we can offer a smoother experience for both contributors and maintainers.\n\nWe're committed to advancing this work and collaborating to remove barriers for open source projects everywhere.\n\n\n## Start the conversation\n\n\nWant to learn more about growing your contributor community?\n\nEmail `contributors@gitlab.com` or [open an issue](https://gitlab.com/gitlab-org/developer-relations/contributor-success/team-task/-/issues)\n\nto start a discussion.\n\nWe're here to help build communities.\n",[1129,1130],"Lee Tickett","Daniel Murphy","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099558/Blog/Hero%20Images/Blog/Hero%20Images/gitlabflatlogomap_gitlabflatlogomap.png_1750099558369.png",[960,268,812],"2025-07-15",{"featured":6,"template":844,"slug":1135},"how-we-use-gitlab-to-grow-open-source-communities",{"content":1137,"config":1145},{"title":1138,"description":1139,"authors":1140,"heroImage":1116,"body":1142,"date":1143,"category":802,"tags":1144},"What’s new in Git 2.50.0?","Here are contributions from GitLab's Git team and the Git community such as the git-diff-pairs(1) command and git-update-ref(1) option to perform batched reference updates.",[1141],"Justin Tobler","The Git project recently released [Git Version\n2.50.0](https://lore.kernel.org/git/xmqq1prj1umb.fsf@gitster.g/T/#u). Let's\nlook at a few notable highlights from this release, which includes\ncontributions from the Git team at GitLab and also the wider Git community.\n\n## New git-diff-pairs(1) command\n\n\nDiffs are at the heart of every code review and show all the changes made\n\nbetween two revisions. GitLab shows diffs in various places, but the most\n\ncommon place is a merge request's [\"Changes\"\ntab](https://docs.gitlab.com/user/project/merge_requests/changes/).\n\nBehind the scenes, diff generation is powered by\n\n[`git-diff(1)`](https://git-scm.com/docs/git-diff). For example:\n\n\n```shell\n\n$ git diff HEAD~1 HEAD\n\n```\n\n\nThis command returns the full diff for all changed files. This might pose a\nscalability challenge because the number of files changed between a set of\nrevisions could be very large and cause the command to reach self-imposed\ntimeouts for the GitLab backend. For large change sets, it would be better\nif\n\nthere were a way to break diff computation into smaller, more digestible\nchunks.\n\n\nOne way this can be achieved is by using\n\n[`git-diff-tree(1)`](https://git-scm.com/docs/git-diff-tree) to retrieve\ninfo\n\nabout all the changed files:\n\n\n```shell\n\n$ git diff-tree -r -M --abbrev HEAD~ HEAD\n\n:100644 100644 c9adfed339 99acf81487 M     \nDocumentation/RelNotes/2.50.0.adoc\n\n:100755 100755 1047b8d11d 208e91a17f M      GIT-VERSION-GEN\n\n```\n\n\nGit refers to this output as the [\"raw\"\nformat](https://git-scm.com/docs/git-diff-tree#_raw_output_format).\n\nIn short, each line of output lists filepairs and the accompanying metadata\n\nabout what has changed between the start and end revisions. Compared to\n\ngenerating the \"patch\" output for large changes, this process is relatively\n\nquick and provides a summary of everything that changed. This command can\noptionally perform rename detection by  appending the `-M` flag to check if\nidentified changes were due to a file rename.\n\n\nWith this information, we could use `git-diff(1)` to compute each of the\n\nfilepair diffs individually. For example, we can provide the blob IDs\n\ndirectly:\n\n\n```shell\n\n$ git diff 1047b8d11de767d290170979a9a20de1f5692e26\n208e91a17f04558ca66bc19d73457ca64d5385f\n\n```\n\n\nWe can repeat this process for each of the filepairs, but spinning up a\n\nseparate Git process for each individual file diff is not very efficient.\n\nFurthermore, when using blob IDs, the diff loses some contextual information\n\nsuch as the change status, and file modes which are stored in with the\nparent\n\ntree object. What we really want is a mechanism to feed \"raw\" filepair info\nand\n\ngenerate the corresponding patch output.\n\n\nWith the 2.50 release, Git has a new built-in command named\n\n[`git-diff-pairs(1)`](https://git-scm.com/docs/git-diff-pairs). This command\n\naccepts \"raw\" formatted filepair info as input on stdin to determine exactly\nwhich patches to output. The following example showcases how this command\ncould be\n\nused:\n\n\n```shell\n\n$ git diff-tree -r -z -M HEAD~ HEAD | git diff-pairs -z\n\n```\n\n\nWhen used in this manner, the resulting output is identical to using\n`git-diff(1)`.\n\nBy having a separate command to generate patch output, the \"raw\" output from\n\n`git-diff-tree(1)` can be broken up into smaller batches of filepairs and\nfed to separate\n\n`git-diff-pairs(1)` processes. This solves the previously mentioned\nscalability\n\nconcern because diffs no longer have to be computed all at once. Future\nGitLab\n\nreleases could build upon this mechanism to improve diff\n\ngeneration performance, especially in cases where large change sets are\n\nconcerned. For more information on this change, check out the corresponding\n\n[mailing-list\nthread](https://lore.kernel.org/git/20250228213346.1335224-1-jltobler@gmail.com/).\n\n\n_This project was led by [Justin Tobler](https://gitlab.com/justintobler)._\n\n\n## Batched reference updates\n\n\nGit provides the\n[`git-update-ref(1)`](https://git-scm.com/docs/git-update-ref)\n\ncommand to perform reference updates. When used with the `--stdin` flag,\n\nmultiple reference updates can be batched together in a single transaction\nby\n\nspecifying instructions for each reference update to be performed on stdin.\n\nBulk updating references in this manner also provides atomic behavior\nwhereby a\n\nsingle reference update failure results in an aborted transaction and no\n\nreferences being updated. Here is an example showcasing this behavior:\n\n\n```shell\n\n# Create repository with three empty commits and branch named \"foo\"\n\n$ git init\n\n$ git commit --allow-empty -m 1\n\n$ git commit --allow-empty -m 2\n\n$ git commit --allow-empty -m 3\n\n$ git branch foo\n\n\n# Print out the commit IDs\n\n$ git rev-list HEAD\n\ncf469bdf5436ea1ded57670b5f5a0797f72f1afc\n\n5a74cd330f04b96ce0666af89682d4d7580c354c\n\n5a6b339a8ebffde8c0590553045403dbda831518\n\n\n# Attempt to create a new reference and update existing reference in\ntransaction.\n\n# Update is expected to fail because the specified old object ID doesn’t\nmatch.\n\n$ git update-ref --stdin \u003C\u003CEOF\n\n> create refs/heads/bar cf469bdf5436ea1ded57670b5f5a0797f72f1afc\n\n> update refs/heads/foo 5a6b339a8ebffde8c0590553045403dbda831518\n5a74cd330f04b96ce0666af89682d4d7580c354c\n\n> EOF\n\nfatal: cannot lock ref 'refs/heads/foo': is at\ncf469bdf5436ea1ded57670b5f5a0797f72f1afc but expected\n5a74cd330f04b96ce0666af89682d4d7580c354c\n\n\n# The \"bar\" reference was not created.\n\n$ git switch bar\n\nfatal: invalid reference: bar\n\n```\n\n\nCompared to updating many references individually, updating in bulk is also\n\nmuch more efficient. While this works well, there might be certain\n\ncircumstances where it is okay for a subset of the requested reference\nupdates\n\nto fail, but we still want to take advantage of the efficiency gains of bulk\n\nupdates.\n\n\nWith this release, `git-update-ref(1)` has the new `--batch-updates` option,\n\nwhich allows the updates to proceed even when one or more reference updates\n\nfails. In this mode, individual failures are reported in the following\nformat:\n\n\n```text\n\nrejected SP (\u003Cold-oid> | \u003Cold-target>) SP (\u003Cnew-oid> | \u003Cnew-target>) SP\n\u003Crejection-reason> LF\n\n```\n\n\nThis allows successful reference updates to proceed while providing context\nto\n\nwhich updates were rejected and for what reason. Using the same example\n\nrepository from the previous example:\n\n\n```shell\n\n# Attempt to create a new reference and update existing reference in\ntransaction.\n\n$ git update-ref --stdin --batch-updates \u003C\u003CEOF\n\n> create refs/heads/bar cf469bdf5436ea1ded57670b5f5a0797f72f1afc\n\n> update refs/heads/foo 5a6b339a8ebffde8c0590553045403dbda831518\n5a74cd330f04b96ce0666af89682d4d7580c354c\n\n> EOF\n\nrejected refs/heads/foo 5a6b339a8ebffde8c0590553045403dbda831518\n5a74cd330f04b96ce0666af89682d4d7580c354c incorrect old value provided\n\n\n# The \"bar\" reference was created even though the update to \"foo\" was\nrejected.\n\n$ git switch bar\n\nSwitched to branch 'bar'\n\n```\n\n\nThis time, with the `--batch-updates` option, the reference creation\nsucceeded\n\neven though the update didn't work. This patch series lays the groundwork\nfor\n\nfuture performance improvements in `git-fetch(1)` and `git-receive-pack(1)`\n\nwhen references are updated in bulk. For more information, check the\n\n[mailing-list\nthread](https://lore.kernel.org/git/20250408085120.614893-1-karthik.188@gmail.com/)\n\n\n_This project was led by [Karthik Nayak](https://gitlab.com/knayakgl)._\n\n\n## New filter option for git-cat-file(1)\n\n\nWith [`git-cat-file(1)`](https://git-scm.com/docs/git-cat-file), it is\npossible\n\nto print info for all objects contained in the repository via the\n\n`--batch–all-objects` option. For example:\n\n\n```shell\n\n# Setup simple repository.\n\n$ git init\n\n$ echo foo >foo\n\n$ git add foo\n\n$ git commit -m init\n\n\n# Create an unreachable object.\n\n$ git commit --amend --no-edit\n\n\n# Use git-cat-file(1) to print info about all objects including unreachable\nobjects.\n\n$ git cat-file --batch-all-objects --batch-check='%(objecttype)\n%(objectname)'\n\ncommit 0b07e71d14897f218f23d9a6e39605b466454ece\n\ntree 205f6b799e7d5c2524468ca006a0131aa57ecce7\n\nblob 257cc5642cb1a054f08cc83f2d943e56fd3ebe99\n\ncommit c999f781fd7214b3caab82f560ffd079ddad0115\n\n```\n\n\nIn some situations, a user might want to search through all objects in the\n\nrepository, but only output a subset based on some specified attribute. For\n\nexample, if we wanted to see only the objects that are commits, we could use\n\n`grep(1)`:\n\n\n```shell\n\n$ git cat-file --batch-all-objects --batch-check='%(objecttype)\n%(objectname)' | grep ^commit\n\ncommit 0b07e71d14897f218f23d9a6e39605b466454ece\n\ncommit c999f781fd7214b3caab82f560ffd079ddad0115\n\n```\n\n\nWhile this works, one downside with filtering the output is that\n\n`git-cat-file(1)` still has to traverse all the objects in the repository,\neven\n\nthe ones that the user is not interested in. This can be rather inefficient.\n\n\nWith this release, `git-cat-file(1)` now has the `--filter` option, which\nonly\n\nshows objects matching the specified criteria. This is similar to the option\nof\n\nthe same name for `git-rev-list(1)`, but with only a subset of the filters\n\nsupported. The supported filters are `blob:none`, `blob:limit=`, as well as\n\n`object:type=`. Similar to the previous example, objects can be filtered by\n\ntype with Git directly:\n\n\n```shell\n\n$ git cat-file --batch-all-objects --batch-check='%(objecttype)\n%(objectname)' --filter='object:type=commit'\n\ncommit 0b07e71d14897f218f23d9a6e39605b466454ece\n\ncommit c999f781fd7214b3caab82f560ffd079ddad0115\n\n```\n\n\nNot only is it convenient for Git to handle the processing, for large\n\nrepositories with many objects, it is also potentially more efficient. If a\n\nrepository has bitmap indices, it becomes possible for Git to efficiently\n\nlookup objects of a specific type, and thus avoid scanning through the\n\npackfile, which leads to a significant speedup. Benchmarks conducted on the\n\n[Chromium repository](https://github.com/chromium/chromium.git) show\n\nsignificant improvements:\n\n\n```text\n\nBenchmark 1: git cat-file --batch-check --batch-all-objects --unordered\n--buffer --no-filter\n   Time (mean ± σ):     82.806 s ±  6.363 s    [User: 30.956 s, System: 8.264 s]\n   Range (min … max):   73.936 s … 89.690 s    10 runs\n\nBenchmark 2: git cat-file --batch-check --batch-all-objects --unordered\n--buffer --filter=object:type=tag\n   Time (mean ± σ):      20.8 ms ±   1.3 ms    [User: 6.1 ms, System: 14.5 ms]\n   Range (min … max):    18.2 ms …  23.6 ms    127 runs\n\nBenchmark 3: git cat-file --batch-check --batch-all-objects --unordered\n--buffer --filter=object:type=commit\n   Time (mean ± σ):      1.551 s ±  0.008 s    [User: 1.401 s, System: 0.147 s]\n   Range (min … max):    1.541 s …  1.566 s    10 runs\n\nBenchmark 4: git cat-file --batch-check --batch-all-objects --unordered\n--buffer --filter=object:type=tree\n   Time (mean ± σ):     11.169 s ±  0.046 s    [User: 10.076 s, System: 1.063 s]\n   Range (min … max):   11.114 s … 11.245 s    10 runs\n\nBenchmark 5: git cat-file --batch-check --batch-all-objects --unordered\n--buffer --filter=object:type=blob\n   Time (mean ± σ):     67.342 s ±  3.368 s    [User: 20.318 s, System: 7.787 s]\n   Range (min … max):   62.836 s … 73.618 s    10 runs\n\nBenchmark 6: git cat-file --batch-check --batch-all-objects --unordered\n--buffer --filter=blob:none\n   Time (mean ± σ):     13.032 s ±  0.072 s    [User: 11.638 s, System: 1.368 s]\n   Range (min … max):   12.960 s … 13.199 s    10 runs\n\nSummary\n   git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=tag\n    74.75 ± 4.61 times faster than git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=commit\n   538.17 ± 33.17 times faster than git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=tree\n   627.98 ± 38.77 times faster than git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=blob:none\n  3244.93 ± 257.23 times faster than git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=blob\n  3990.07 ± 392.72 times faster than git cat-file --batch-check --batch-all-objects --unordered --buffer --no-filter\n```\n\n\nInterestingly, these results indicate that the computation time now scales\nwith\n\nthe number of objects for a given type instead of the number of total\nobjects\n\nin the packfile. The original mailing-list thread can be found\n\n[here](https://lore.kernel.org/git/20250221-pks-cat-file-object-type-filter-v1-0-0852530888e2@pks.im/).\n\n\n_This project was led by [Patrick\nSteinhardt](https://gitlab.com/pks-gitlab)._\n\n\n## Improved performance when generating bundles\n\n\nGit provides a means to generate an archive of a repository which contains a\n\nspecified set of references and accompanying reachable objects via the\n\n[`git-bundle(1)`](https://git-scm.com/docs/git-bundle) command. This\noperation\n\nis used by GitLab to generate repository backups and also as part of the\n\n[bundle-URI](https://git-scm.com/docs/bundle-uri) mechanism.\n\n\nFor large repositories containing millions of references, this operation can\n\ntake hours or even days. For example, with the main GitLab repository\n\n([gitlab-org/gitlab](https://gitlab.com/gitlab-org/gitlab)), backup times\nwere\n\naround 48 hours. Investigation revealed there was a performance bottleneck\ndue\n\nto how Git was performing a check to avoid duplicated references being\nincluded\n\nin the bundle. The implementation used a nested `for` loop to iterate and\n\ncompare all listed references, leading to O(N^2) time complexity. This\nscales\n\nvery poorly as the number of references in a repository increases.\n\n\nIn this release, this issue was addressed by replacing the nested loops with\na\n\nmap data structure leading to a significant speedup. The following benchmark\n\nthe performance improvement for creating a bundle with a repository\ncontaining\n\n100,000 references:\n\n\n```text\n\nBenchmark 1: bundle (refcount = 100000, revision = master)\n  Time (mean ± σ):     14.653 s ±  0.203 s    [User: 13.940 s, System: 0.762 s]\n  Range (min … max):   14.237 s … 14.920 s    10 runs\n\nBenchmark 2: bundle (refcount = 100000, revision = HEAD)\n  Time (mean ± σ):      2.394 s ±  0.023 s    [User: 1.684 s, System: 0.798 s]\n  Range (min … max):    2.364 s …  2.425 s    10 runs\n\nSummary\n  bundle (refcount = 100000, revision = HEAD) ran\n    6.12 ± 0.10 times faster than bundle (refcount = 100000, revision = master)\n```\n\n\nTo learn more, check out our blog post\n\n[How we decreased GitLab repo backup times from 48 hours to 41\nminutes](https://about.gitlab.com/blog/how-we-decreased-gitlab-repo-backup-times-from-48-hours-to-41-minutes/).\n\nYou can also find the original mailing list thread\n\n[here](https://lore.kernel.org/git/20250401-488-generating-bundles-with-many-references-has-non-linear-performance-v1-0-6d23b2d96557@gmail.com/).\n\n\n_This project was led by [Karthik Nayak](https://gitlab.com/knayakgl)._\n\n\n## Better bundle URI unbundling\n\n\nThrough the [bundle URI](https://git-scm.com/docs/bundle-uri) mechanism in\nGit,\n\nlocations to fetch bundles from can be provided to clients with the goal to\n\nhelp speed up clones and fetches. When a client downloads a bundle,\nreferences\n\nunder `refs/heads/*` are copied from the bundle into the repository along\nwith\n\ntheir accompanying objects. A bundle might contain additional references\n\noutside of `refs/heads/*` such as `refs/tags/*`, which are simply ignored\nwhen\n\nusing bundle URI on clone.\n\n\nIn Git 2.50, this restriction is lifted, and all references\n\nmatching `refs/*` contained in the downloaded bundle are copied.\n\n[Scott Chacon](https://github.com/schacon), who contributed this\nfunctionality,\n\ndemonstrates the difference when cloning\n\n[gitlab-org/gitlab-foss](https://gitlab.com/gitlab-org/gitlab-foss):\n\n\n```shell\n\n$ git-v2.49 clone --bundle-uri=gitlab-base.bundle\nhttps://gitlab.com/gitlab-org/gitlab-foss.git gl-2.49\n\nCloning into 'gl2.49'...\n\nremote: Enumerating objects: 1092703, done.\n\nremote: Counting objects: 100% (973405/973405), done.\n\nremote: Compressing objects: 100% (385827/385827), done.\n\nremote: Total 959773 (delta 710976), reused 766809 (delta 554276),\npack-reused 0 (from 0)\n\nReceiving objects: 100% (959773/959773), 366.94 MiB | 20.87 MiB/s, done.\n\nResolving deltas: 100% (710976/710976), completed with 9081 local objects.\n\nChecking objects: 100% (4194304/4194304), done.\n\nChecking connectivity: 959668, done.\n\nUpdating files: 100% (59972/59972), done.\n\n\n$ git-v2.50 clone --bundle-uri=gitlab-base.bundle\nhttps://gitlab.com/gitlab-org/gitlab-foss.git gl-2.50\n\nCloning into 'gl-2.50'...\n\nremote: Enumerating objects: 65538, done.\n\nremote: Counting objects: 100% (56054/56054), done.\n\nremote: Compressing objects: 100% (28950/28950), done.\n\nremote: Total 43877 (delta 27401), reused 25170 (delta 13546), pack-reused 0\n(from 0)\n\nReceiving objects: 100% (43877/43877), 40.42 MiB | 22.27 MiB/s, done.\n\nResolving deltas: 100% (27401/27401), completed with 8564 local objects.\n\nUpdating files: 100% (59972/59972), done.\n\n```\n\n\nComparing these results, we see that Git 2.50 fetches 43,887 objects\n\n(40.42 MiB) after the bundle was extracted whereas Git 2.49 fetches a\n\ntotal of 959,773 objects (366.94 MiB). Git 2.50 fetches roughly 95% fewer\n\nobjects and 90% less data, which benefits both the client and the server.\nThe\n\nserver needs to process a lot less data to the client and the client needs\nto\n\ndownload and extract less data. In the example provided by Scott this led to\na\n\nspeedup of 25%.\n\n\nTo learn more, check out the corresponding\n\n[mailing-list\nthread](https://lore.kernel.org/git/pull.1897.git.git.1740489585344.gitgitgadget@gmail.com/).\n\n\n_This patch series was contributed by [Scott\nChacon](https://github.com/schacon)._\n\n\n## Read more\n\n\nThis article highlighted just a few of the contributions made by GitLab and\n\nthe wider Git community for this latest release. You can learn about these\nfrom\n\nthe [official release\nannouncement](https://lore.kernel.org/git/xmqq1prj1umb.fsf@gitster.g/) of\nthe Git project. Also, check\n\nout our [previous Git release blog\nposts](https://about.gitlab.com/blog/tags/git/)\n\nto see other past highlights of contributions from GitLab team members.\n","2025-06-16",[1120,960,268],{"featured":91,"template":844,"slug":1146},"what-s-new-in-git-2-50-0",{"category":90,"slug":812,"posts":1148},[1149,1159,1167],{"content":1150,"config":1157},{"title":1151,"description":1152,"heroImage":1153,"date":880,"category":812,"tags":1154},"GitLab Patch Release: 18.5.1, 18.4.3, 18.3.5","Learn more about this release for GitLab Community Edition (CE) and Enterprise Edition (EE).","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749661926/Blog/Hero%20Images/security-patch-blog-image-r2-0506-700x400-fy25_2x.jpg",[1155,1156],"patch releases","security releases",{"featured":6,"template":844,"externalUrl":1158},"https://about.gitlab.com/releases/2025/10/22/patch-release-gitlab-18-5-1-released/",{"content":1160,"config":1165},{"title":1161,"description":1162,"heroImage":1163,"date":1040,"category":812,"tags":1164},"GitLab 18.5 released","Learn what's inside, including GitLab Duo Planner Agent (Beta), GitLab Duo Security Analyst Agent (Beta), Maven virtual registry UI (Beta), and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1760628860/ueqgsanpldmpxgzp1fre.png",[812],{"featured":91,"template":844,"externalUrl":1166},"https://about.gitlab.com/releases/2025/10/16/gitlab-18-5-released/",{"content":1168,"config":1176},{"title":1169,"description":1170,"authors":1171,"category":812,"tags":1172,"heroImage":1173,"date":1174,"body":1175},"Optimize GitLab object storage for scale and performance","Configure GitLab object storage for maximum performance and cost savings. Learn consolidated forms, direct downloads, and identity-based authentication.",[940],[812,857,856],"https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665151/Blog/Hero%20Images/blog-image-template-1800x945__27_.png","2025-10-08","Managing GitLab at scale requires strategic object storage configuration.\n\nHere's how to configure object storage for maximum performance, security,\n\nand reliability across your GitLab components.\n\n## Use consolidated form for GitLab components\n\nFor artifacts, LFS, uploads, packages, and other GitLab data, eliminate credential duplication with the consolidated form:\n\n```\ngitlab_rails['object_store']['enabled'] = true\n\ngitlab_rails['object_store']['connection'] = {\n  'provider' => 'AWS',\n  'region' => 'us-east-1',\n  'use_iam_profile' => true\n}\n\ngitlab_rails['object_store']['objects']['artifacts']['bucket'] = 'gitlab-artifacts'\n\ngitlab_rails['object_store']['objects']['lfs']['bucket'] = 'gitlab-lfs'\n\n# ... additional buckets for each object type\n\n```\nThis reduces complexity while enabling encrypted S3 buckets and proper Content-MD5 headers.\n\n## Configure container registry separately\n\nThe container registry requires its own configuration since it doesn't support the consolidated form:\n\n```\nregistry['storage'] = {\n  's3_v2' => {  # Use the new v2 driver\n    'bucket' => 'gitlab-registry',\n    'region' => 'us-east-1',\n    # Omit access keys to use IAM roles\n  }\n}\n\n```\n\n\n**Note:** The s3_v1 driver is deprecated and will be removed in GitLab 19.0. Migrate to s3_v2 for better performance and reliability.\n\n\n## Disable proxy download for performance\n\n\nSet `proxy_download` to **false** (default) for direct downloads:\n\n```\n# For GitLab objects - can be set globally\n\ngitlab_rails['object_store']['proxy_download'] = false\n\n# Or configure per bucket for granular control\n\ngitlab_rails['object_store']['objects']['artifacts']['proxy_download'] = false\n\ngitlab_rails['object_store']['objects']['lfs']['proxy_download'] = false\n\ngitlab_rails['object_store']['objects']['uploads']['proxy_download'] = true  # Example: keep proxy for uploads\n\n# Container registry defaults to redirect mode (direct downloads)\n\n# Only disable if your environment requires it:\n\nregistry['storage']['redirect']['disable'] = false  # Keep as false\n\n```\n\n**Important:** The `proxy_download` option can be configured globally at the object-store level or individually per bucket. This gives you flexibility to optimize based on your specific use case — for example, you might want direct downloads for large artifacts and LFS files, but proxy smaller uploads through GitLab for additional security controls.\n\nThis dramatically reduces server load and egress costs by letting clients download directly from object storage.\n\n## Choose identity-based authentication\n\n**AWS:** Use IAM roles instead of access keys:\n\n```\n# GitLab objects\n\ngitlab_rails['object_store']['connection'] = {\n  'provider' => 'AWS',\n  'use_iam_profile' => true\n}\n\n# Container registry\n\nregistry['storage'] = {\n  's3_v2' => {\n    'bucket' => 'gitlab-registry',\n    'region' => 'us-east-1'\n    # No access keys = IAM role authentication\n  }\n}\n\n```\n\n\n**Google Cloud Platform:** Enable application default credentials:\n\n```\n\ngitlab_rails['object_store']['connection'] = {\n  'provider' => 'Google',\n  'google_application_default' => true\n}\n\n```\n\n**Azure:** Use workload identities by omitting storage access keys.\n\n## Add encryption layers\n\nEnable server-side encryption for additional security:\n\n```\n# GitLab objects\n\ngitlab_rails['object_store']['storage_options'] = {\n  'server_side_encryption' => 'AES256'\n}\n\n# Container registry\n\nregistry['storage'] = {\n  's3_v2' => {\n    'bucket' => 'gitlab-registry',\n    'encrypt' => true\n  }\n}\n\n```\n\nFor AWS KMS encryption, specify the key ARN in `server_side_encryption_kms_key_id`.\n\n## Use separate buckets for organization\n\nCreate dedicated buckets for each component:\n\n* **gitlab-artifacts** - CI/CD job artifacts\n\n* **gitlab-lfs** - Git LFS objects\n\n* **gitlab-uploads** - User uploads\n\n* **gitlab-packages** - Package registry\n\n* **gitlab-registry** - Container images\n\nThis isolation improves security, enables granular access controls, and simplifies cost tracking.\n\n## Key configuration differences\n\n| Component | Consolidated Form | Identity Auth | Encryption | Direct Downloads |\n| --- | --- | --- | --- | ---|\n| Artifacts, LFS, Packages | ✅ Supported | ✅ use_iam_profile | ✅ storage_options | ✅ proxy_download: false |\n| Container Registry | ❌ Separate config | ✅ Omit access keys | ✅ encrypt: true | ✅ redirect enabled by default |\n\n## Migration path\n\n1. **Start with GitLab objects:** Use the consolidated form for immediate complexity reduction.\n\n2. **Configure registry separately:** Use s3_v2 driver with IAM authentication.\n\n3. **Enable encryption:** Add server-side encryption for both components.\n\n4. **Optimize performance:** Ensure direct downloads are enabled with appropriate `proxy_download` settings.\n\n5. **Set up lifecycle policies:** Configure S3 lifecycle rules to clean up incomplete multipart uploads.\n\n\n## Additional resources\n\n\nFor a complete AWS S3 configuration example, see the [GitLab documentation on AWS S3 object storage setup](https://docs.gitlab.com/administration/object_storage/#aws-s3).\n\n\nFor more details on configuring proxy_download parameters per bucket, refer to the [GitLab object storage configuration documentation](https://docs.gitlab.com/administration/object_storage/#configure-the-parameters-of-each-object).\n\n\n*These configurations will scale with your growth while maintaining security and performance. The separation between GitLab object storage and container registry configurations reflects their different underlying architectures, but both benefit from the same optimization principles.*\n",{"featured":6,"template":844,"slug":1177},"optimize-gitlab-object-storage-for-scale-and-performance",{"category":125,"slug":822,"posts":1179},[1180,1191,1199],{"content":1181,"config":1189},{"title":1182,"description":1183,"authors":1184,"heroImage":1186,"date":893,"body":1187,"category":822,"tags":1188},"Delivering faster and smarter scans with Advanced SAST","New accuracy and speed enhancements improve the developer experience and drive adoption. ",[1185],"Salman Ladha","https://res.cloudinary.com/about-gitlab-com/image/upload/v1759320418/xjmqcozxzt4frx0hori3.png","Static application security testing (SAST) is critical to building secure software, helping teams identify vulnerabilities in code before they can be exploited. Last year, with GitLab 17.4, we [launched Advanced SAST](https://about.gitlab.com/blog/gitlab-advanced-sast-is-now-generally-available/) to deliver higher-quality scan results directly in developer workflows. Since then, Advanced SAST has powered millions of scans across over a hundred thousand codebases, reducing risk and helping customers build more secure applications from the start.\n\nWe’re building on that foundation with a set of performance enhancements designed to improve accuracy and speed, so developers get results they can trust, without losing their flow. [New capabilities](https://about.gitlab.com/blog/gitlab-18-5-intelligence-that-moves-software-development-forward/) include better out-of-the-box precision, the ability to add custom detection rules, and a trio of improvements to accelerate scan times through multi-core scanning, algorithmic optimizations, and diff-based scanning. Together, these improvements make [Advanced SAST](https://docs.gitlab.com/user/application_security/sast/gitlab_advanced_sast/) smarter and faster, delivering security that’s developer-friendly by design.\n\n## SAST adoption hinges on both accuracy and speed  \n\nMost SAST programs rarely fail due to inaccurate vulnerability detection; they fail because developers don’t adopt security tooling. Too often, AppSec solutions like SAST deliver accuracy at the expense of the developer experience, or developer experience at the expense of accuracy. In reality, both are necessary. Without accuracy, developers don’t trust the results; without speed and usability, adoption lags. \n\nWhen both come together, security fits naturally into the development process — and that’s the only way security teams successfully drive SAST adoption at scale. This philosophy guides the GitLab roadmap for Advanced SAST.\n\n## Add custom detection rules for greater accuracy \n\nThe built-in Advanced SAST rules are informed by our in-house security research team, designed to maximize accuracy out of the box. Until now, you could [disable rules](https://docs.gitlab.com/user/application_security/sast/customize_rulesets/) or adjust their name, description, or severity, but you couldn’t add new detection logic. With GitLab 18.5, teams can now define their own custom, pattern-based rules to catch organization-specific issues — like flagging banned function calls — while still using GitLab’s curated ruleset as the baseline. Any violations of custom rules are reported in the same place as built-in GitLab rules, so developers can glean information from a single dashboard.\n\nCustom rules are effective at catching straightforward issues that matter to your organization, but they don’t influence the taint analysis that Advanced SAST uses to catch injections and similar flaws. Customizations are managed through simple TOML files, just like other SAST ruleset configurations. The result is higher-quality scan results tuned to your context, giving security teams more control and developers clearer, more actionable findings.\n\n## Faster scans to get developers in the flow \n\nSpeed matters. If a SAST scan takes too long, developers often switch to another task, so adoption suffers. \n\nThat’s why we’ve invested in several performance-based enhancements to dramatically reduce scan times without compromising on accuracy, including:  \n\n* **Multi-core scanning**: Leverages multiple CPU cores on GitLab Runners   \n* **Diff-based scanning**: Scans only the changed code in a merge request   \n* **Ongoing optimizations**: Smarter algorithms and engine enhancements \n\nThese improvements build on each other, delivering faster scans with significant impact:\n\n* Multi-core scanning typically reduces scan runtime by up to **50%.**  \n* Diff-based scanning helps the most in large repositories, where less code is modified in each change. It’s specifically designed to give faster feedback in the code review process by delivering faster scans in merge requests. In our testing, many large repositories now take less than **10 minutes to return results in MRs, where previously scans took more than 20 minutes.**  \n* In recent internal testing, algorithmic optimizations **cut scan times by up to 71%** on large open-source codebases, with Apache Lucene (Java) showing the biggest improvement. Other projects, including Django (Python), Kafka, and Zulip, also saw **performance boosts of over 50% in single-core mode**. You can see the results for yourself below. \n\nFor developers, these improvements mean quicker feedback in merge requests, less waiting on security results, and a smoother path to adoption. And with multi-core scanning and diff-based analysis layered on top, the gains will be even greater.\n\n![chart showing Python scan times](https://res.cloudinary.com/about-gitlab-com/image/upload/v1760714805/rxl2zzo58j7y0k2ldxeq.png)\n\u003Cp>\u003C/p>\n\n![chart showing Java scan times](https://res.cloudinary.com/about-gitlab-com/image/upload/v1760714805/hz9bsrir6nrqthkjddvi.png)\n\n\u003Cp>\u003C/p>\n\n> These performance gains reflect GitLab’s broader focus on improving the developer experience across our platform. For example, one of our customers recently transitioned to GitLab’s [Pipeline Execution Policies](https://docs.gitlab.com/user/application_security/policies/pipeline_execution_policies/) (PEP) to gain greater control and flexibility over how security scans run within their pipelines. By standardizing templates, adding caching, and optimizing pipeline logic, their teams cut dependency scan runtimes from **15–60 minutes down to just 1–2 minutes per job — saving roughly 100,000 compute minutes every day across 15,000 scans**. It’s a clear example of how more customizable and efficient pipeline execution policies lead to faster feedback loops, higher productivity, and broader adoption.\n\nWith these latest enhancements, Advanced SAST gives security and development teams the accuracy, speed, and flexibility they need to keep up with modern software development. By reducing false positives, enabling custom detection, and accelerating scan times, we’re making security an enabler — not a blocker — for developers.\n\nLike all of [GitLab’s application security capabilities](https://about.gitlab.com/solutions/application-security-testing/), Advanced SAST is built directly into our DevSecOps platform, making security a natural part of how developers build, test, deploy, and secure software. \n\nThe result: faster adoption, fewer bottlenecks, and more secure applications delivered from the start.\n\n> Get started with Advanced SAST today! Sign up for a [free trial of GitLab Ultimate](https://about.gitlab.com/free-trial/).\n\n## Learn more\n\n- [GitLab Advanced SAST is now generally available](https://about.gitlab.com/blog/gitlab-advanced-sast-is-now-generally-available/)\n- [A comprehensive guide to GitLab DAST](https://about.gitlab.com/blog/comprehensive-guide-to-gitlab-dast/)\n- [GitLab Security Testing solutions](https://about.gitlab.com/solutions/application-security-testing/)",[822,812],{"featured":6,"template":844,"slug":1190},"delivering-faster-and-smarter-scans-with-advanced-sast",{"content":1192,"config":1197},{"title":1193,"description":1194,"heroImage":1153,"date":1174,"category":822,"tags":1195},"GitLab Patch Release: 18.4.2, 18.3.4, 18.2.8","Learn more about GitLab Patch Release: 18.4.2, 18.3.4, 18.2.8 for GitLab Community Edition (CE) and Enterprise Edition (EE).",[1196],"releases",{"featured":6,"template":844,"externalUrl":1198},"https://about.gitlab.com/releases/2025/10/08/patch-release-gitlab-18-4-2-released/",{"config":1200,"content":1202},{"slug":1201,"featured":91,"template":844},"comprehensive-guide-to-gitlab-dast",{"title":1203,"description":1204,"authors":1205,"heroImage":1186,"date":1079,"updatedDate":1207,"category":822,"tags":1208,"body":1209},"A comprehensive guide to GitLab DAST","DevSecOps teams can learn how to implement and configure dynamic application security testing, perform passive/active scans, and set security policies.",[1206],"Fernando Diaz","2025-10-01",[822,857,1067],"Modern businesses entirely depend on web-based platforms for customer interactions, financial\ntransactions, data processing, and core business operations. As digital transformation\naccelerates and remote or hybrid work becomes the norm, the attack surface for web applications has\nexpanded dramatically, making them prime targets for cybercriminals. Therefore, securing web applications has become more critical than ever.\n\nWhile static code analysis catches vulnerabilities in source code, it cannot identify\nruntime security issues that emerge when applications interact with real-world\nenvironments, third-party services, and complex user workflows. This is where Dynamic\nApplication Security Testing ([DAST](https://docs.gitlab.com/user/application_security/dast/)) becomes invaluable. GitLab's integrated DAST solution provides teams with automated security testing capabilities directly within their CI/CD pipelines, on a schedule, or on-demand, enabling continuous security validation\nwithout disrupting development workflows.\n\n## Why DAST?\n\nDAST should be implemented because it provides critical runtime security validation by testing applications\nin their actual operating environment, identifying vulnerabilities that static analysis cannot detect.\nAdditionally, GitLab DAST can be seamlessly integrated into shift-left security workflows, and\ncan enhance compliance assurance along with risk management.\n\n### Runtime vulnerability detection\n\nDAST excels at identifying security vulnerabilities that only manifest when applications are running.\nUnlike static analysis tools that examine code at rest, DAST scanners interact with live applications\nas an external attacker would, uncovering issues such as:\n\n- **Authentication and session management flaws** that could allow unauthorized access\n- **Input validation vulnerabilities,** including SQL injection, cross-site scripting (XSS), and command injection\n- **Configuration weaknesses** in web servers, databases, and application frameworks\n- **Business logic flaws** that emerge from complex user interactions\n- **API security issues,** including improper authentication, authorization, and data exposure\n\nDAST complements other security testing approaches to provide comprehensive application security coverage. When combined with Static Application Security Testing ([SAST](https://docs.gitlab.com/user/application_security/sast/)), Software Composition Analysis ([SCA](https://docs.gitlab.com/user/application_security/dependency_scanning/)), manual\npenetration testing, and [many other scanner types](https://about.gitlab.com/solutions/application-security-testing/), DAST fills critical gaps in security validation:\n\n- **Black-box testing perspective** that mimics real-world attack scenarios\n- **Environment-specific testing** that validates security in actual deployment configurations\n- **Third-party component testing,** including APIs, libraries, and external services\n- **Configuration validation** across the entire application stack\n\n### Seamless shift-left security integration\n\nGitLab DAST seamlessly integrates into existing CI/CD pipelines, enabling teams to identify security\nissues early in the development lifecycle. This shift-left approach provides several key benefits:\n\n- **Cost reduction** — Fixing vulnerabilities during development is significantly less expensive than addressing them in production. Studies show that remediation costs can be 10 to 100 times higher in production environments.\n- **Faster time-to-market** — Automated security testing eliminates bottlenecks caused by manual security reviews, allowing teams to maintain rapid deployment schedules while ensuring security standards.\n- **Developer empowerment** — By providing immediate feedback on security issues, DAST helps developers build security awareness and improve their coding practices over time.\n\n### Compliance and risk management\n\nMany regulatory frameworks and industry standards require regular security testing of web applications.\nDAST helps organizations meet compliance requirements for standards such as:\n\n- **PCI DSS** for applications handling payment card data\n- **SOC 2** security controls for service organizations\n- **ISO 27001** information security management requirements\n\nThe automated nature of GitLab DAST ensures consistent, repeatable security testing that auditors can\nrely on, while detailed reporting provides the documentation needed for compliance validation.\n\n## Implementing DAST\n\nBefore implementing GitLab DAST, ensure your environment meets the following requirements:\n\n- **GitLab version and Ultimate subscription** — DAST is available in [GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/) and requires GitLab 13.4 or later for full functionality; however, the [latest version](https://about.gitlab.com/releases/categories/releases/) is recommended.\n- **Application accessibility** — Your application must be accessible via HTTP/HTTPS with a publicly reachable URL or accessible within your GitLab Runner's network.\n- **Authentication setup** — If your application requires authentication, prepare test credentials or configure authentication bypass mechanisms for security testing.\n\n### Basic implementation\n\nThe simplest way to add DAST to your pipeline is by including the DAST template in your [`.gitlab-ci.yml`](https://docs.gitlab.com/ci/#step-1-create-a-gitlab-ciyml-file) file\nand providing a website to scan:\n\n```yaml\ninclude:\n  - template: DAST.gitlab-ci.yml\n\nvariables:\n  DAST_WEBSITE: \"https://your-application.example.com\"\n```\n\nThis basic configuration will:\n- Run a DAST scan against your specified website\n- Generate a security report in GitLab's security dashboard\n- Fail the pipeline if high-severity vulnerabilities are detected\n- Store scan results as pipeline artifacts\n\nHowever, it is suggested to gain the full benefit of [CI/CD](https://about.gitlab.com/topics/ci-cd/), you can first deploy the application\nand set DAST to run only after an application has been deployed. The application URL can be\ndynamically created and the DAST job can be configured fully with [GitLab Job syntax](https://docs.gitlab.com/ci/yaml/).\n\n```yaml\nstages:\n  - build\n  - deploy\n  - dast\n\ninclude:\n  - template: Security/DAST.gitlab-ci.yml\n\n# Builds and pushes application to GitLab's built-in container registry\nbuild:\n  stage: build\n  variables:\n    IMAGE: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA\n  before_script:\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n  script:\n    - docker build -t $IMAGE .\n    - docker push $IMAGE\n\n# Deploys application to your suggested target, setsup the dast site dynamically, requires build to complete\ndeploy:\n  stage: deploy\n  script:\n    - echo \"DAST_WEBSITE=http://your-application.example.com\" >> deploy.env\n    - echo \"Perform deployment here\"\n  environment:\n    name: $DEPLOY_NAME\n    url: http://your-application.example.com\n  artifacts:\n    reports:\n      dotenv: deploy.env\n  dependencies:\n    - build\n\n# Configures DAST to run a an active scan on non-main branches, and a passive scan on the main branches and requires a deployment to complete before it is run\ndast:\n  stage: dast\n  rules:\n    - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH\n      variables:\n        DAST_FULL_SCAN: \"false\"\n    - if: $CI_COMMIT_REF_NAME != $CI_DEFAULT_BRANCH\n      variables:\n        DAST_FULL_SCAN: \"true\"\n  dependencies:\n    - deploy\n```\n\nYou can learn from an example by seeing the [Tanuki Shop](https://gitlab.com/gitlab-da/tutorials/security-and-governance/tanuki-shop) demo application, which generates the\nfollowing pipeline:\n\n![Standard DAST Pipeline](https://res.cloudinary.com/about-gitlab-com/image/upload/v1758118303/rr3cyxjwyecxbmrdxon6.png)\n\n### Understanding passive vs. active scans\n\nIn the example above we enabled active scanning for non-default branches:\n\n```yaml\n- if: $CI_COMMIT_REF_NAME != $CI_DEFAULT_BRANCH\n  variables:\n    DAST_FULL_SCAN: \"true\"\n```\n\nGitLab DAST employs two distinct scanning methodologies (passive and active), each serving\ndifferent security testing needs.\n\n**Passive scans** analyze application responses without sending potentially harmful requests. This approach:\n\n- Examines HTTP headers, cookies, and response content for security misconfigurations\n- Identifies information disclosure vulnerabilities like exposed server versions or stack traces\n- Detects missing security headers (CSP, HSTS, X-Frame-options)\n- Analyzes SSL/TLS configuration and certificate issues\n\n**Active scans** send crafted requests designed to trigger vulnerabilities. This approach:\n\n- Tests for injection vulnerabilities (SQL injection, XSS, command injection)\n- Attempts to exploit authentication and authorization flaws\n- Validates input sanitization and output encoding\n- Tests for business logic vulnerabilities\n\n**Note:** The DAST scanner is set to passive by default.\n\nDAST has several configuration options that can be applied via environment variables.\nFor a list of all the possible configuration options for DAST, see the [DAST documentation](https://docs.gitlab.com/user/application_security/dast/browser/configuration/customize_settings/).\n\n### Authentication configuration\n\nDAST requires authentication configuration in CI/CD jobs to achieve complete security coverage. Authentication enables DAST to simulate real attacks and test user-specific features only accessible after login. The DAST job typically authenticates by submitting login forms in a browser, then verifies success before continuing to crawl the application with saved credentials. Failed authentication stops the job.\n\nSupported authentication methods:\n- Single-step login form\n- Multi-step login form\n- Authentication to URLs outside the target scope\n\nHere is an example for a single-step login form in a [Tanuki Shop MR](https://gitlab.com/gitlab-da/tutorials/security-and-governance/tanuki-shop/-/merge_requests/20) which adds\nadmin authentication to non-default branches.\n\n```yaml\ndast:\n  stage: dast\n  before_script:\n    - echo \"DAST_TARGET_URL set to '$DAST_TARGET_URL'\" # Dynamically loaded from deploy job\n    - echo \"DAST_AUTH_URL set to '$DAST_TARGET_URL'\" # Dynamically loaded from deploy jobs\n  rules:\n    - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH\n      variables:\n        DAST_FULL_SCAN: \"false\"\n    - if: $CI_COMMIT_REF_NAME != $CI_DEFAULT_BRANCH\n      variables:\n        DAST_FULL_SCAN: \"true\" # run both passive and active checks\n        DAST_AUTH_USERNAME: \"admin@tanuki.local\" # The username to authenticate to in the website\n        DAST_AUTH_PASSWORD: \"admin123\" # The password to authenticate to in the website\n        DAST_AUTH_USERNAME_FIELD: \"css:input[id=email]\" # A selector describing the element used to enter the username on the login form\n        DAST_AUTH_PASSWORD_FIELD: \"css:input[id=password]\" # A selector describing the element used to enter the password on the login form\n        DAST_AUTH_SUBMIT_FIELD: \"css:button[id=loginButton]\" # A selector describing the element clicked on to submit the login form\n        DAST_SCOPE_EXCLUDE_ELEMENTS: \"css:[id=navbarLogoutButton]\" # Comma-separated list of selectors that are ignored when scanning\n        DAST_AUTH_REPORT: \"true\" # generate a report detailing steps taken during the authentication process\n        DAST_REQUEST_COOKIES: \"welcomebanner_status:dismiss,cookieconsent_status:dismiss\" # A cookie name and value to be added to every request\n        DAST_CRAWL_GRAPH: \"true\" # generate an SVG graph of navigation paths visited during crawl phase of the scan\n  dependencies:\n    - deploy-kubernetes\n```\n\nYou can see if the authentication was successful by viewing the job logs:\n\n![Auth logs](https://res.cloudinary.com/about-gitlab-com/image/upload/v1758118293/zdxgwb6jmseyzwcjscrz.png)\n\nOnce this job completes it provides an authentication report which includes screenshots of the login page:\n\n![Auth report](https://res.cloudinary.com/about-gitlab-com/image/upload/v1758118292/idm62deg3ezeehcubmc1.png)\n\nYou can also see more examples on DAST with authentication in our [DAST demos](https://gitlab.com/gitlab-org/security-products/demos/dast/) group.\nTo learn more about how to perform DAST with authentication with your specific requirements, see the [DAST authentication documentation](https://docs.gitlab.com/user/application_security/dast/browser/configuration/authentication/).\n\nWatch this video demonstration of GitLab DAST authentication configuration:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://www.youtube.com/embed/q_oAgEYILc8?si=b_kll6G7MxssQE8j\" allowfullscreen=\"true\" title=\"GitLab DAST Tutorial Video\">\u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Viewing results in MR\n\nGitLab's DAST seamlessly integrates security scanning into your development workflow\nby displaying results directly within merge requests:\n\n![DAST MR 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1758118293/rrx4n3pgxi9vmzlas8vp.png)  \n![DAST MR 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1758118294/rh9vwv6ohoaenpvicujm.png)  \n![DAST MR 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1758118294/ficelmulsc0r7bijf24m.png)\n\nThese results include comprehensive vulnerability data within MRs to help developers identify and address\nsecurity issues before code is merged. Here's what DAST typically reports:\n\n### Vulnerability details\n* Vulnerability name and type (e.g., SQL injection, XSS, CSRF)\n* Severity level (Critical, High, Medium, Low, Info)\n* CVSS score when applicable\n* Common Weakness Enumeration (CWE) identifier\n* Confidence level of the finding\n\n### Location information\n* URL/endpoint where the vulnerability was detected\n* HTTP method used (GET, POST, etc.)\n* Request/response details showing the vulnerable interaction\n* Parameter names that are vulnerable\n* Evidence demonstrating the vulnerability\n\n#### Technical context\n* Description of the vulnerability and potential impact\n* Proof of concept showing how the vulnerability can be exploited\n* Request/response pairs that triggered the finding\n* Scanner details (which DAST tool detected it)\n\n### Remediation guidance\n* Solution recommendations for fixing the vulnerability\n* References to security standards (OWASP, etc.)\n* Links to documentation for remediation steps\n\n## Viewing results in GitLab Vulnerability Report\n\nFor managing vulnerabilities located in the default (or production) branch, the GitLab Vulnerability Report provides a centralized dashboard for monitoring all security findings (in the default branch) across your entire project or organization. This comprehensive view aggregates all security scan results, offering filtering and sorting capabilities to help security teams prioritize remediation efforts. \n\n![Vulnerability Report](https://res.cloudinary.com/about-gitlab-com/image/upload/v1758118304/o8jjgngtxqplcgux9h5p.png)\n\nWhen selecting a vulnerability, you are taken to its vulnerability page:\n\n![Vulnerability Page 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1758118303/rolcgxhe0lh2s54zz2kc.png)  \n![Vulnerability Page 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1758118303/dubic3yacd5n11ine1vi.png)  \n![Vulnerability Page 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1758118303/iojrm3zasqxljuybbqcs.png)\n\nJust like in merge requests, the vulnerability page provides comprehensive vulnerability data, as seen above. From here you can triage vulnerabilities by assigning them with a status:\n\n* Needs triage (Default)\n* Confirmed\n* Dismissed (Acceptable risk, False positive, Mitigating control, Used in tests, Not applicable)\n* Resolved\n\nWhen a vulnerability status is changed, the audit log includes a note of who changed it, when it was changed, and the reason it was changed. This comprehensive system allows security teams to efficiently prioritize, track, and manage vulnerabilities throughout their lifecycle with clear accountability and detailed risk context.\n\n## On-demand and scheduled DAST\n\nGitLab provides flexible scanning options beyond standard CI/CD pipeline integration through\non-demand and scheduled DAST scans. On-demand scans allow security teams and developers to\ninitiate DAST testing manually whenever needed, without waiting for code commits or pipeline triggers.\nThis capability is particularly valuable for ad-hoc security assessments, incident response scenarios,\nor when testing specific application features that may not be covered in regular pipeline scans.\n\n![On-demand 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1758118296/hs3fhn42ceycmd94oaua.png)  \n![On-demand 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1758118298/wiptmr948xey6rrodosg.png)\n\nOn-demand scans can be configured with custom parameters, target URLs, and scanning profiles, making\nthem ideal for focused security testing of particular application components or newly-deployed features.\nScheduled DAST scans provide automated, time-based security testing that operates independently of\nthe development workflow. These scans can be configured to run daily, weekly, or at custom intervals,\nensuring continuous security monitoring of production applications.\n\n![Scheduling DAST](https://res.cloudinary.com/about-gitlab-com/image/upload/v1758118300/dbxgkeahij4fklkpcpck.png)\n\nTo learn how to implement on-demand or scheduled scans within your project, see the\n[DAST on-demand scan documentation](https://docs.gitlab.com/user/application_security/dast/on-demand_scan/)\n\n## DAST in compliance workflows\n\nGitLab's security policies framework allows organizations to enforce consistent security\nstandards across all projects, while maintaining flexibility for different teams and environments.\nSecurity policies enable centralized governance of DAST scanning requirements, ensuring that\ncritical applications receive appropriate security testing without requiring individual project\nconfiguration. By defining security policies at the group or instance level, security teams can\nmandate DAST scans for specific project types, deployment environments, or risk classifications.\n\n**Scan/Pipeline Execution Policies** can be configured to automatically trigger DAST scans based on\nspecific conditions such as merge requests to protected branches, scheduled intervals, or deployment events.\nFor example, a policy might require full active DAST scans for all applications before production deployment,\nwhile allowing passive scans only for development branches. These policies can include custom variables,\nauthentication configurations, and exclusion rules that are automatically applied to all covered projects,\nreducing the burden on development teams and ensuring security compliance.\n\n![Scan Execution Policy](https://res.cloudinary.com/about-gitlab-com/image/upload/v1758118299/twe0967sayasvassimf3.png)\n\n**Merge Request Approval Policies** provide an additional layer of security governance by enforcing human\nreview for code changes that may impact security. These policies can be configured to require security team\napproval when DAST scans detect new vulnerabilities, when security findings exceed defined thresholds, or\nwhen changes affect security-critical components. For example, a policy might automatically require approval\nfrom a designated security engineer when DAST findings include high-severity vulnerabilities, while allowing\nlower-risk findings to proceed with standard code review processes.\n\n![MR Approval Policy](https://res.cloudinary.com/about-gitlab-com/image/upload/v1758118295/w0odyhf3gnkxis3f61ma.png)\n\nTo learn more about GitLab security policies, see the [policy documentation](https://docs.gitlab.com/user/application_security/policies/).\nAdditionally, for compliance, GitLab provides [Security Inventory](https://docs.gitlab.com/user/application_security/security_inventory/)\nand [Compliance center](https://docs.gitlab.com/user/compliance/compliance_center/), which can allow you to oversee\nif DAST is running in your environment and where it is required.\n\n![Security Inventory](https://res.cloudinary.com/about-gitlab-com/image/upload/v1758118300/hro6gykf7igpnnczmpyg.png)\n\nTo learn more about these features, visit our [software compliance solutions page](https://about.gitlab.com/solutions/software-compliance/).\n\n## Summary\n\nGitLab DAST represents a powerful solution for integrating dynamic security testing into modern development workflows. By implementing DAST in your CI/CD pipeline, your team gains the ability to automatically detect runtime vulnerabilities, maintain compliance with security standards, and build more secure applications without sacrificing development velocity.\n\nThe key to successful DAST implementation lies in starting with basic configuration and gradually expanding to more sophisticated scanning profiles as your security maturity grows. Begin with simple website scanning, then progressively add authentication, custom exclusions, and advanced reporting to match your specific security requirements.\n\nRemember that DAST is most effective when combined with other security testing approaches. Use it alongside static analysis, dependency scanning, and manual security reviews to create a comprehensive security testing strategy. The automated nature of GitLab DAST ensures that security testing becomes a consistent, repeatable part of your development process rather than an afterthought.\n\n> To learn more about GitLab security, check out our [security testing solutions page](https://about.gitlab.com/solutions/application-security-testing/). To get started with GitLab DAST, [sign up for a free trial of GitLab Ultimate today](https://about.gitlab.com/free-trial/devsecops/).\n",{"content":1211,"config":1214},{"heroImage":888,"title":889,"description":890,"authors":1212,"date":893,"body":894,"category":736,"tags":1213},[892],[856,812,542],{"featured":91,"template":844,"slug":897},[1216,1221,1225],{"content":1217,"config":1220},{"title":875,"description":876,"authors":1218,"heroImage":879,"date":880,"body":881,"category":736,"tags":1219},[878],[732,883,857,812,856],{"featured":6,"template":844,"slug":885},{"content":1222,"config":1224},{"title":1151,"description":1152,"heroImage":1153,"date":880,"category":812,"tags":1223},[1155,1156],{"featured":6,"template":844,"externalUrl":1158},{"content":1226,"config":1229},{"title":1182,"description":1183,"authors":1227,"heroImage":1186,"date":893,"body":1187,"category":822,"tags":1228},[1185],[822,812],{"featured":6,"template":844,"slug":1190},[1231,1236,1240],{"content":1232,"config":1235},{"title":1034,"description":1035,"authors":1233,"heroImage":1039,"date":1040,"body":1041,"category":780,"tags":1234},[1037,1038],[109,573,857],{"featured":91,"template":844,"slug":1044},{"content":1237,"config":1239},{"title":1161,"description":1162,"heroImage":1163,"date":1040,"category":812,"tags":1238},[812],{"featured":91,"template":844,"externalUrl":1166},{"content":1241,"config":1244},{"title":1047,"description":1048,"authors":1242,"heroImage":1051,"date":1052,"body":1053,"category":780,"tags":1243},[1050],[812,856,822,857],{"featured":6,"template":844,"slug":1056},1761249105695]