[{"data":1,"prerenderedAt":5609},["ShallowReactive",2],{"/en-us/blog/tags/ci-cd/":3,"navigation-en-us":20,"banner-en-us":449,"footer-en-us":466,"CI/CD-tag-page-en-us":676},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":8,"config":11,"_id":13,"_type":14,"title":15,"_source":16,"_file":17,"_stem":18,"_extension":19},"/en-us/blog/tags/ci-cd","tags",false,"",{"tag":9,"tagSlug":10},"CI/CD","ci-cd",{"template":12},"BlogTag","content:en-us:blog:tags:ci-cd.yml","yaml","Ci Cd","content","en-us/blog/tags/ci-cd.yml","en-us/blog/tags/ci-cd","yml",{"_path":21,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":23,"_id":445,"_type":14,"title":446,"_source":16,"_file":447,"_stem":448,"_extension":19},"/shared/en-us/main-navigation","en-us",{"logo":24,"freeTrial":29,"sales":34,"login":39,"items":44,"search":376,"minimal":407,"duo":426,"pricingDeployment":435},{"config":25},{"href":26,"dataGaName":27,"dataGaLocation":28},"/","gitlab logo","header",{"text":30,"config":31},"Get free trial",{"href":32,"dataGaName":33,"dataGaLocation":28},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":35,"config":36},"Talk to sales",{"href":37,"dataGaName":38,"dataGaLocation":28},"/sales/","sales",{"text":40,"config":41},"Sign in",{"href":42,"dataGaName":43,"dataGaLocation":28},"https://gitlab.com/users/sign_in/","sign in",[45,89,186,191,297,357],{"text":46,"config":47,"cards":49,"footer":72},"Platform",{"dataNavLevelOne":48},"platform",[50,56,64],{"title":46,"description":51,"link":52},"The most comprehensive AI-powered DevSecOps Platform",{"text":53,"config":54},"Explore our Platform",{"href":55,"dataGaName":48,"dataGaLocation":28},"/platform/",{"title":57,"description":58,"link":59},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":60,"config":61},"Meet GitLab Duo",{"href":62,"dataGaName":63,"dataGaLocation":28},"/gitlab-duo/","gitlab duo ai",{"title":65,"description":66,"link":67},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":68,"config":69},"Learn more",{"href":70,"dataGaName":71,"dataGaLocation":28},"/why-gitlab/","why gitlab",{"title":73,"items":74},"Get started with",[75,80,85],{"text":76,"config":77},"Platform Engineering",{"href":78,"dataGaName":79,"dataGaLocation":28},"/solutions/platform-engineering/","platform engineering",{"text":81,"config":82},"Developer Experience",{"href":83,"dataGaName":84,"dataGaLocation":28},"/developer-experience/","Developer experience",{"text":86,"config":87},"MLOps",{"href":88,"dataGaName":86,"dataGaLocation":28},"/topics/devops/the-role-of-ai-in-devops/",{"text":90,"left":91,"config":92,"link":94,"lists":98,"footer":168},"Product",true,{"dataNavLevelOne":93},"solutions",{"text":95,"config":96},"View all Solutions",{"href":97,"dataGaName":93,"dataGaLocation":28},"/solutions/",[99,123,147],{"title":100,"description":101,"link":102,"items":107},"Automation","CI/CD and automation to accelerate deployment",{"config":103},{"icon":104,"href":105,"dataGaName":106,"dataGaLocation":28},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[108,111,115,119],{"text":9,"config":109},{"href":110,"dataGaLocation":28,"dataGaName":9},"/solutions/continuous-integration/",{"text":112,"config":113},"AI-Assisted Development",{"href":62,"dataGaLocation":28,"dataGaName":114},"AI assisted development",{"text":116,"config":117},"Source Code Management",{"href":118,"dataGaLocation":28,"dataGaName":116},"/solutions/source-code-management/",{"text":120,"config":121},"Automated Software Delivery",{"href":105,"dataGaLocation":28,"dataGaName":122},"Automated software delivery",{"title":124,"description":125,"link":126,"items":131},"Security","Deliver code faster without compromising security",{"config":127},{"href":128,"dataGaName":129,"dataGaLocation":28,"icon":130},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[132,137,142],{"text":133,"config":134},"Application Security Testing",{"href":135,"dataGaName":136,"dataGaLocation":28},"/solutions/application-security-testing/","Application security testing",{"text":138,"config":139},"Software Supply Chain Security",{"href":140,"dataGaLocation":28,"dataGaName":141},"/solutions/supply-chain/","Software supply chain security",{"text":143,"config":144},"Software Compliance",{"href":145,"dataGaName":146,"dataGaLocation":28},"/solutions/software-compliance/","software compliance",{"title":148,"link":149,"items":154},"Measurement",{"config":150},{"icon":151,"href":152,"dataGaName":153,"dataGaLocation":28},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[155,159,163],{"text":156,"config":157},"Visibility & Measurement",{"href":152,"dataGaLocation":28,"dataGaName":158},"Visibility and Measurement",{"text":160,"config":161},"Value Stream Management",{"href":162,"dataGaLocation":28,"dataGaName":160},"/solutions/value-stream-management/",{"text":164,"config":165},"Analytics & Insights",{"href":166,"dataGaLocation":28,"dataGaName":167},"/solutions/analytics-and-insights/","Analytics and insights",{"title":169,"items":170},"GitLab for",[171,176,181],{"text":172,"config":173},"Enterprise",{"href":174,"dataGaLocation":28,"dataGaName":175},"/enterprise/","enterprise",{"text":177,"config":178},"Small Business",{"href":179,"dataGaLocation":28,"dataGaName":180},"/small-business/","small business",{"text":182,"config":183},"Public Sector",{"href":184,"dataGaLocation":28,"dataGaName":185},"/solutions/public-sector/","public sector",{"text":187,"config":188},"Pricing",{"href":189,"dataGaName":190,"dataGaLocation":28,"dataNavLevelOne":190},"/pricing/","pricing",{"text":192,"config":193,"link":195,"lists":199,"feature":284},"Resources",{"dataNavLevelOne":194},"resources",{"text":196,"config":197},"View all resources",{"href":198,"dataGaName":194,"dataGaLocation":28},"/resources/",[200,233,256],{"title":201,"items":202},"Getting started",[203,208,213,218,223,228],{"text":204,"config":205},"Install",{"href":206,"dataGaName":207,"dataGaLocation":28},"/install/","install",{"text":209,"config":210},"Quick start guides",{"href":211,"dataGaName":212,"dataGaLocation":28},"/get-started/","quick setup checklists",{"text":214,"config":215},"Learn",{"href":216,"dataGaLocation":28,"dataGaName":217},"https://university.gitlab.com/","learn",{"text":219,"config":220},"Product documentation",{"href":221,"dataGaName":222,"dataGaLocation":28},"https://docs.gitlab.com/","product documentation",{"text":224,"config":225},"Best practice videos",{"href":226,"dataGaName":227,"dataGaLocation":28},"/getting-started-videos/","best practice videos",{"text":229,"config":230},"Integrations",{"href":231,"dataGaName":232,"dataGaLocation":28},"/integrations/","integrations",{"title":234,"items":235},"Discover",[236,241,246,251],{"text":237,"config":238},"Customer success stories",{"href":239,"dataGaName":240,"dataGaLocation":28},"/customers/","customer success stories",{"text":242,"config":243},"Blog",{"href":244,"dataGaName":245,"dataGaLocation":28},"/blog/","blog",{"text":247,"config":248},"Remote",{"href":249,"dataGaName":250,"dataGaLocation":28},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":252,"config":253},"TeamOps",{"href":254,"dataGaName":255,"dataGaLocation":28},"/teamops/","teamops",{"title":257,"items":258},"Connect",[259,264,269,274,279],{"text":260,"config":261},"GitLab Services",{"href":262,"dataGaName":263,"dataGaLocation":28},"/services/","services",{"text":265,"config":266},"Community",{"href":267,"dataGaName":268,"dataGaLocation":28},"/community/","community",{"text":270,"config":271},"Forum",{"href":272,"dataGaName":273,"dataGaLocation":28},"https://forum.gitlab.com/","forum",{"text":275,"config":276},"Events",{"href":277,"dataGaName":278,"dataGaLocation":28},"/events/","events",{"text":280,"config":281},"Partners",{"href":282,"dataGaName":283,"dataGaLocation":28},"/partners/","partners",{"backgroundColor":285,"textColor":286,"text":287,"image":288,"link":292},"#2f2a6b","#fff","Insights for the future of software development",{"altText":289,"config":290},"the source promo card",{"src":291},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758208064/dzl0dbift9xdizyelkk4.svg",{"text":293,"config":294},"Read the latest",{"href":295,"dataGaName":296,"dataGaLocation":28},"/the-source/","the source",{"text":298,"config":299,"lists":301},"Company",{"dataNavLevelOne":300},"company",[302],{"items":303},[304,309,315,317,322,327,332,337,342,347,352],{"text":305,"config":306},"About",{"href":307,"dataGaName":308,"dataGaLocation":28},"/company/","about",{"text":310,"config":311,"footerGa":314},"Jobs",{"href":312,"dataGaName":313,"dataGaLocation":28},"/jobs/","jobs",{"dataGaName":313},{"text":275,"config":316},{"href":277,"dataGaName":278,"dataGaLocation":28},{"text":318,"config":319},"Leadership",{"href":320,"dataGaName":321,"dataGaLocation":28},"/company/team/e-group/","leadership",{"text":323,"config":324},"Team",{"href":325,"dataGaName":326,"dataGaLocation":28},"/company/team/","team",{"text":328,"config":329},"Handbook",{"href":330,"dataGaName":331,"dataGaLocation":28},"https://handbook.gitlab.com/","handbook",{"text":333,"config":334},"Investor relations",{"href":335,"dataGaName":336,"dataGaLocation":28},"https://ir.gitlab.com/","investor relations",{"text":338,"config":339},"Trust Center",{"href":340,"dataGaName":341,"dataGaLocation":28},"/security/","trust center",{"text":343,"config":344},"AI Transparency Center",{"href":345,"dataGaName":346,"dataGaLocation":28},"/ai-transparency-center/","ai transparency center",{"text":348,"config":349},"Newsletter",{"href":350,"dataGaName":351,"dataGaLocation":28},"/company/contact/","newsletter",{"text":353,"config":354},"Press",{"href":355,"dataGaName":356,"dataGaLocation":28},"/press/","press",{"text":358,"config":359,"lists":360},"Contact us",{"dataNavLevelOne":300},[361],{"items":362},[363,366,371],{"text":35,"config":364},{"href":37,"dataGaName":365,"dataGaLocation":28},"talk to sales",{"text":367,"config":368},"Get help",{"href":369,"dataGaName":370,"dataGaLocation":28},"/support/","get help",{"text":372,"config":373},"Customer portal",{"href":374,"dataGaName":375,"dataGaLocation":28},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":377,"login":378,"suggestions":385},"Close",{"text":379,"link":380},"To search repositories and projects, login to",{"text":381,"config":382},"gitlab.com",{"href":42,"dataGaName":383,"dataGaLocation":384},"search login","search",{"text":386,"default":387},"Suggestions",[388,390,394,396,400,404],{"text":57,"config":389},{"href":62,"dataGaName":57,"dataGaLocation":384},{"text":391,"config":392},"Code Suggestions (AI)",{"href":393,"dataGaName":391,"dataGaLocation":384},"/solutions/code-suggestions/",{"text":9,"config":395},{"href":110,"dataGaName":9,"dataGaLocation":384},{"text":397,"config":398},"GitLab on AWS",{"href":399,"dataGaName":397,"dataGaLocation":384},"/partners/technology-partners/aws/",{"text":401,"config":402},"GitLab on Google Cloud",{"href":403,"dataGaName":401,"dataGaLocation":384},"/partners/technology-partners/google-cloud-platform/",{"text":405,"config":406},"Why GitLab?",{"href":70,"dataGaName":405,"dataGaLocation":384},{"freeTrial":408,"mobileIcon":413,"desktopIcon":418,"secondaryButton":421},{"text":409,"config":410},"Start free trial",{"href":411,"dataGaName":33,"dataGaLocation":412},"https://gitlab.com/-/trials/new/","nav",{"altText":414,"config":415},"Gitlab Icon",{"src":416,"dataGaName":417,"dataGaLocation":412},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203874/jypbw1jx72aexsoohd7x.svg","gitlab icon",{"altText":414,"config":419},{"src":420,"dataGaName":417,"dataGaLocation":412},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203875/gs4c8p8opsgvflgkswz9.svg",{"text":422,"config":423},"Get Started",{"href":424,"dataGaName":425,"dataGaLocation":412},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":427,"mobileIcon":431,"desktopIcon":433},{"text":428,"config":429},"Learn more about GitLab Duo",{"href":62,"dataGaName":430,"dataGaLocation":412},"gitlab duo",{"altText":414,"config":432},{"src":416,"dataGaName":417,"dataGaLocation":412},{"altText":414,"config":434},{"src":420,"dataGaName":417,"dataGaLocation":412},{"freeTrial":436,"mobileIcon":441,"desktopIcon":443},{"text":437,"config":438},"Back to pricing",{"href":189,"dataGaName":439,"dataGaLocation":412,"icon":440},"back to pricing","GoBack",{"altText":414,"config":442},{"src":416,"dataGaName":417,"dataGaLocation":412},{"altText":414,"config":444},{"src":420,"dataGaName":417,"dataGaLocation":412},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":450,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"title":451,"button":452,"image":457,"config":461,"_id":463,"_type":14,"_source":16,"_file":464,"_stem":465,"_extension":19},"/shared/en-us/banner","is now in public beta!",{"text":453,"config":454},"Try the Beta",{"href":455,"dataGaName":456,"dataGaLocation":28},"/gitlab-duo/agent-platform/","duo banner",{"altText":458,"config":459},"GitLab Duo Agent Platform",{"src":460},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1753720689/somrf9zaunk0xlt7ne4x.svg",{"layout":462},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":467,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":468,"_id":672,"_type":14,"title":673,"_source":16,"_file":674,"_stem":675,"_extension":19},"/shared/en-us/main-footer",{"text":469,"source":470,"edit":476,"contribute":481,"config":486,"items":491,"minimal":664},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":471,"config":472},"View page source",{"href":473,"dataGaName":474,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":477,"config":478},"Edit this page",{"href":479,"dataGaName":480,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":482,"config":483},"Please contribute",{"href":484,"dataGaName":485,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":487,"facebook":488,"youtube":489,"linkedin":490},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[492,515,571,600,634],{"title":46,"links":493,"subMenu":498},[494],{"text":495,"config":496},"DevSecOps platform",{"href":55,"dataGaName":497,"dataGaLocation":475},"devsecops platform",[499],{"title":187,"links":500},[501,505,510],{"text":502,"config":503},"View plans",{"href":189,"dataGaName":504,"dataGaLocation":475},"view plans",{"text":506,"config":507},"Why Premium?",{"href":508,"dataGaName":509,"dataGaLocation":475},"/pricing/premium/","why premium",{"text":511,"config":512},"Why Ultimate?",{"href":513,"dataGaName":514,"dataGaLocation":475},"/pricing/ultimate/","why ultimate",{"title":516,"links":517},"Solutions",[518,523,525,527,532,537,541,544,548,553,555,558,561,566],{"text":519,"config":520},"Digital transformation",{"href":521,"dataGaName":522,"dataGaLocation":475},"/topics/digital-transformation/","digital transformation",{"text":133,"config":524},{"href":135,"dataGaName":133,"dataGaLocation":475},{"text":122,"config":526},{"href":105,"dataGaName":106,"dataGaLocation":475},{"text":528,"config":529},"Agile development",{"href":530,"dataGaName":531,"dataGaLocation":475},"/solutions/agile-delivery/","agile delivery",{"text":533,"config":534},"Cloud transformation",{"href":535,"dataGaName":536,"dataGaLocation":475},"/topics/cloud-native/","cloud transformation",{"text":538,"config":539},"SCM",{"href":118,"dataGaName":540,"dataGaLocation":475},"source code management",{"text":9,"config":542},{"href":110,"dataGaName":543,"dataGaLocation":475},"continuous integration & delivery",{"text":545,"config":546},"Value stream management",{"href":162,"dataGaName":547,"dataGaLocation":475},"value stream management",{"text":549,"config":550},"GitOps",{"href":551,"dataGaName":552,"dataGaLocation":475},"/solutions/gitops/","gitops",{"text":172,"config":554},{"href":174,"dataGaName":175,"dataGaLocation":475},{"text":556,"config":557},"Small business",{"href":179,"dataGaName":180,"dataGaLocation":475},{"text":559,"config":560},"Public sector",{"href":184,"dataGaName":185,"dataGaLocation":475},{"text":562,"config":563},"Education",{"href":564,"dataGaName":565,"dataGaLocation":475},"/solutions/education/","education",{"text":567,"config":568},"Financial services",{"href":569,"dataGaName":570,"dataGaLocation":475},"/solutions/finance/","financial services",{"title":192,"links":572},[573,575,577,579,582,584,586,588,590,592,594,596,598],{"text":204,"config":574},{"href":206,"dataGaName":207,"dataGaLocation":475},{"text":209,"config":576},{"href":211,"dataGaName":212,"dataGaLocation":475},{"text":214,"config":578},{"href":216,"dataGaName":217,"dataGaLocation":475},{"text":219,"config":580},{"href":221,"dataGaName":581,"dataGaLocation":475},"docs",{"text":242,"config":583},{"href":244,"dataGaName":245,"dataGaLocation":475},{"text":237,"config":585},{"href":239,"dataGaName":240,"dataGaLocation":475},{"text":247,"config":587},{"href":249,"dataGaName":250,"dataGaLocation":475},{"text":260,"config":589},{"href":262,"dataGaName":263,"dataGaLocation":475},{"text":252,"config":591},{"href":254,"dataGaName":255,"dataGaLocation":475},{"text":265,"config":593},{"href":267,"dataGaName":268,"dataGaLocation":475},{"text":270,"config":595},{"href":272,"dataGaName":273,"dataGaLocation":475},{"text":275,"config":597},{"href":277,"dataGaName":278,"dataGaLocation":475},{"text":280,"config":599},{"href":282,"dataGaName":283,"dataGaLocation":475},{"title":298,"links":601},[602,604,606,608,610,612,614,618,623,625,627,629],{"text":305,"config":603},{"href":307,"dataGaName":300,"dataGaLocation":475},{"text":310,"config":605},{"href":312,"dataGaName":313,"dataGaLocation":475},{"text":318,"config":607},{"href":320,"dataGaName":321,"dataGaLocation":475},{"text":323,"config":609},{"href":325,"dataGaName":326,"dataGaLocation":475},{"text":328,"config":611},{"href":330,"dataGaName":331,"dataGaLocation":475},{"text":333,"config":613},{"href":335,"dataGaName":336,"dataGaLocation":475},{"text":615,"config":616},"Sustainability",{"href":617,"dataGaName":615,"dataGaLocation":475},"/sustainability/",{"text":619,"config":620},"Diversity, inclusion and belonging (DIB)",{"href":621,"dataGaName":622,"dataGaLocation":475},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":338,"config":624},{"href":340,"dataGaName":341,"dataGaLocation":475},{"text":348,"config":626},{"href":350,"dataGaName":351,"dataGaLocation":475},{"text":353,"config":628},{"href":355,"dataGaName":356,"dataGaLocation":475},{"text":630,"config":631},"Modern Slavery Transparency Statement",{"href":632,"dataGaName":633,"dataGaLocation":475},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":635,"links":636},"Contact Us",[637,640,642,644,649,654,659],{"text":638,"config":639},"Contact an expert",{"href":37,"dataGaName":38,"dataGaLocation":475},{"text":367,"config":641},{"href":369,"dataGaName":370,"dataGaLocation":475},{"text":372,"config":643},{"href":374,"dataGaName":375,"dataGaLocation":475},{"text":645,"config":646},"Status",{"href":647,"dataGaName":648,"dataGaLocation":475},"https://status.gitlab.com/","status",{"text":650,"config":651},"Terms of use",{"href":652,"dataGaName":653,"dataGaLocation":475},"/terms/","terms of use",{"text":655,"config":656},"Privacy statement",{"href":657,"dataGaName":658,"dataGaLocation":475},"/privacy/","privacy statement",{"text":660,"config":661},"Cookie preferences",{"dataGaName":662,"dataGaLocation":475,"id":663,"isOneTrustButton":91},"cookie preferences","ot-sdk-btn",{"items":665},[666,668,670],{"text":650,"config":667},{"href":652,"dataGaName":653,"dataGaLocation":475},{"text":655,"config":669},{"href":657,"dataGaName":658,"dataGaLocation":475},{"text":660,"config":671},{"dataGaName":662,"dataGaLocation":475,"id":663,"isOneTrustButton":91},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"allPosts":677,"featuredPost":5587,"totalPagesCount":5607,"initialPosts":5608},[678,705,728,749,770,793,813,837,858,882,903,924,946,965,986,1006,1027,1049,1071,1091,1112,1134,1154,1174,1195,1215,1235,1255,1276,1296,1315,1334,1355,1374,1394,1415,1435,1455,1475,1496,1516,1537,1557,1577,1596,1614,1631,1650,1669,1687,1705,1725,1746,1765,1785,1805,1825,1845,1865,1885,1905,1926,1946,1966,1986,2005,2025,2045,2064,2083,2104,2124,2143,2162,2181,2202,2221,2242,2261,2280,2299,2319,2338,2359,2379,2397,2417,2436,2454,2473,2492,2511,2532,2550,2569,2588,2606,2626,2645,2665,2685,2708,2727,2746,2765,2786,2808,2828,2848,2868,2886,2904,2923,2943,2962,2982,3000,3021,3039,3059,3078,3098,3117,3136,3156,3176,3194,3214,3233,3254,3273,3294,3315,3334,3353,3372,3391,3412,3430,3450,3469,3489,3508,3526,3547,3567,3589,3608,3628,3647,3667,3686,3706,3725,3744,3763,3782,3801,3823,3842,3861,3879,3899,3917,3935,3954,3972,3992,4011,4030,4050,4070,4089,4108,4126,4145,4165,4184,4202,4222,4241,4259,4279,4299,4319,4338,4357,4375,4393,4413,4432,4452,4471,4489,4509,4528,4547,4567,4585,4605,4623,4643,4662,4682,4701,4720,4741,4760,4777,4795,4814,4832,4851,4869,4888,4907,4927,4946,4964,4983,5002,5022,5043,5062,5082,5100,5120,5137,5156,5175,5195,5217,5234,5254,5273,5292,5313,5334,5353,5373,5394,5413,5432,5450,5471,5490,5509,5527,5547,5566],{"_path":679,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":680,"content":688,"config":698,"_id":701,"_type":14,"title":702,"_source":16,"_file":703,"_stem":704,"_extension":19},"/en-us/blog/3-gitlab-features-to-level-up-devsecops-workflows",{"title":681,"description":682,"ogTitle":681,"ogDescription":682,"noIndex":6,"ogImage":683,"ogUrl":684,"ogSiteName":685,"ogType":686,"canonicalUrls":684,"schema":687},"3 GitLab features to level up DevSecOps workflows","Fix broken pipelines faster, better understand security vulnerabilities, and filter out false positives with our latest platform improvements.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665762/Blog/Hero%20Images/blog-gl17-release-hero-17-0-93-1800x945-fy25__1_.png","https://about.gitlab.com/blog/3-gitlab-features-to-level-up-devsecops-workflows","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 GitLab features to level up DevSecOps workflows\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Salman Ladha\"}],\n        \"datePublished\": \"2024-10-29\",\n      }",{"title":681,"description":682,"authors":689,"heroImage":683,"date":691,"body":692,"category":693,"tags":694},[690],"Salman Ladha","2024-10-29","Last month, we, along with the GitLab community, introduced more than 140 improvements to our AI-powered DevSecOps platform to help you build better and more secure software, faster. With that much product innovation, we know it can be difficult to keep track of the latest GitLab has to offer. So, each quarter, we’re spotlighting the most impactful capabilities to help you consolidate toolchains, boost development efficiency, and improve application security. Here are three new features [released in GitLab](https://about.gitlab.com/releases/categories/releases/) over the past few months that make an immediate impact on your software development.\n\n > Learn why GitLab was named a Leader in the [2024 Gartner® Magic Quadrant™ for DevOps Platforms](https://about.gitlab.com/blog/gitlab-named-a-leader-in-the-2024-gartner-magic-quadrant-for-devops/) and the [2024 Gartner® Magic Quadrant™ for AI Code Assistants](https://about.gitlab.com/blog/gitlab-named-a-leader-in-2024-gartner-magic-quadrant-for-ai-code-assistants/).\n\n## Root Cause Analysis: Diagnose broken pipelines faster\n\n[Developers spend less than a quarter of their time on code creation](https://about.gitlab.com/developer-survey/), according to our 2024 Global DevSecOps Survey. The bulk of their time is consumed by administrative tasks, planning, and troubleshooting — many of which can be accelerated with AI.\n\nFor example, diagnosing broken pipelines is a frustrating task for developers, which requires them to tediously scour through dense log files to identify the cause of the error. This often leads to trial-and-error fixes, sleuthing for solutions on Google, or asking a peer for support. This is a practical scenario where [GitLab Duo Root Cause Analysis](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/) can meaningfully help developers.\n\nRoot Cause Analysis analyzes log files to uncover the core issue behind an error message in a CI/CD pipeline. Not only does it provide teams with insight into what caused the issue, but it also suggests a fix to help resolve the issue faster.\n\nWith less time spent on troubleshooting, developers can focus on building differentiated products to help their organizations win.\n\nGitLab Duo Root Cause Analysis is available as a [GitLab Duo Enterprise add-on](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial&toggle=gitlab-duo-pro).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/sTpSLwX5DIs?si=JZSgd7GTTk4y6mre\" frameborder=\"0\" allowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Vulnerability Explanation: Quickly understand security risks\n\nWe know that developers are playing [an even greater role in the remediation of security vulnerabilities](https://about.gitlab.com/developer-survey/). However, not every developer is well-versed in cybersecurity or has a working knowledge of the tactics, techniques, and procedures a threat actor will use to exploit an application. This creates a knowledge gap, which is exposed when vulnerabilities are uncovered.\n\n[GitLab Duo Vulnerability Explanation](https://about.gitlab.com/the-source/ai/understand-and-resolve-vulnerabilities-with-ai-powered-gitlab-duo/) bridges the knowledge gap between security and development teams. It gives developers a detailed description of the vulnerability infecting their code, real-world examples of how attackers can exploit the vulnerable code, and practical suggestions for remediation.\n\nWith this feature, you can level up your security skills, resolve vulnerabilities faster, and help create a proactive security culture — all while lightening the load on your security teams.\nGitLab Duo Vulnerability Explanation is available as a [GitLab Duo Enterprise add-on](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial&toggle=gitlab-duo-pro).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/MMVFvGrmMzw?si=Zsx-91078XSNNUSm\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Advanced SAST: Filter out the noise\n\nFalse positives are a [top frustration](https://about.gitlab.com/developer-survey/2024/security-compliance/) for both security and development teams. Unfortunately, this is a common complaint of traditional Static Application Security Testing (SAST). While SAST is great at integrating security early in the software development lifecycle, its value diminishes when it produces inaccurate results. “Drowning in a backlog of vulnerabilities” is a reality for many security and development teams, often resulting in tension between them.\n\n[Advanced SAST](https://about.gitlab.com/blog/gitlab-advanced-sast-is-now-generally-available/), our newest security scanner, uses a proprietary detection engine with rules informed by in-house security research to identify exploitable vulnerabilities. It delivers more accurate results, so security and development teams don’t have to sort through the noise of false-positive results, shortening triage time, improving development velocity, and decreasing friction between teams.\n\nAdvanced SAST is available in the [GitLab Ultimate tier](https://about.gitlab.com/pricing/ultimate/).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/xDa1MHOcyn8?si=Ff4HjNpvv5eXsSNH\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Put these features to work today\n\nAt GitLab, we’re committed to making it easier for teams to build software, faster. Capabilities like GitLab Duo Root Cause Analysis, GitLab Duo Vulnerability Explanation, and GitLab Advanced SAST are just a few of the recent innovations we’ve delivered to help developers and security teams level up their DevSecOps workflows. To learn more, check out our [releases page](https://about.gitlab.com/releases/categories/releases/).\n\n> Get started with these new features today with [a free trial of GitLab Ultimate](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com%2F).","product",[693,695,696,697,9],"features","DevSecOps","security",{"slug":699,"featured":91,"template":700},"3-gitlab-features-to-level-up-devsecops-workflows","BlogPost","content:en-us:blog:3-gitlab-features-to-level-up-devsecops-workflows.yml","3 Gitlab Features To Level Up Devsecops Workflows","en-us/blog/3-gitlab-features-to-level-up-devsecops-workflows.yml","en-us/blog/3-gitlab-features-to-level-up-devsecops-workflows",{"_path":706,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":707,"content":713,"config":722,"_id":724,"_type":14,"title":725,"_source":16,"_file":726,"_stem":727,"_extension":19},"/en-us/blog/5-teams-that-made-the-switch-to-gitlab-ci-cd",{"title":708,"description":709,"ogTitle":708,"ogDescription":709,"noIndex":6,"ogImage":710,"ogUrl":711,"ogSiteName":685,"ogType":686,"canonicalUrls":711,"schema":712},"5 Teams that made the switch to GitLab CI/CD","See what happened when these five teams moved on from old continuous integration and delivery solutions and switched to GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678657/Blog/Hero%20Images/ci-cd-competitive-campaign-blog-cover.png","https://about.gitlab.com/blog/5-teams-that-made-the-switch-to-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 Teams that made the switch to GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-04-25\",\n      }",{"title":708,"description":709,"authors":714,"heroImage":710,"date":716,"body":717,"category":718,"tags":719},[715],"Chrissie Buchanan","2019-04-25","\nNo team is immune to process challenges, and as organizations grow these challenges only get worse. Sometimes there’s a lack of visibility during the development process, sometimes legacy systems create instability and lack functionality, and sometimes things just _stop working_. Continuous integration and delivery [(CI/CD)](/topics/ci-cd/) enables teams to deploy faster, and finding the right tool can make a big difference in the development lifecycle. Great companies know how to identify problems and when it’s time to find better solutions.\n\nWe’ve previously shared [why teams love GitLab CI/CD](/blog/why-gitlab-ci-cd/), and now we want to highlight five real-world examples of teams that abandoned dated continuous integration and delivery solutions and made the switch to GitLab CI/CD. We’ll show you how they:\n\n*   Reduced costs.\n*   Deployed faster.\n*   Improved efficiency.\n*   Made engineers’ lives easier.\n\n### Verizon Connect\n\nThe Verizon Connect Telematics Container Cloud Platform team had several challenges: too many tasks, disjointed processes, and outdated, Java-based monolithic applications. Add tools like [BitBucket](/competition/bitbucket/), Jenkins, and Jira in the mix and the Verizon Connect team was struggling with _data center builds that took nearly 30 days_. It was time to start from scratch.\n\nThe team chose GitLab to support this infrastructure initiative and reduced data center deploys from 30 days to _under eight hours_.\n\n[Read on](/blog/verizon-customer-story/)\n{: .alert .alert-gitlab-purple}\n\n### Ticketmaster\n\nFor the Ticketmaster mobile team, a two-hour pipeline for a minor change was the last straw. After years with Jenkins and a system weighed down by plugins and legacy development, they knew they needed to reevaluate their continuous integration and delivery tools.\n\nAfter adopting GitLab CI/CD, Ticketmaster was able to move to weekly releases, decreasing their pipeline execution time from two hours to _only eight minutes_ to build, test, and publish artifacts.\n\nLearn how GitLab CI/CD gave the mobile team their Friday afternoons back.\n\n[Read more](/blog/continuous-integration-ticketmaster/)\n{: .alert .alert-gitlab-purple}\n\n### HumanGeo\n\nAs a software development company, HumanGeo ships a lot of code. Development speed is vital, and when Jenkins CI became yet another thing to manage, they needed to make a change.\n\nJustin Shelton, an engineer at HumanGeo, talks about why they decided to switch to GitLab CI/CD, and how they were able to:\n\n*   Cut admin time by 96 percent.\n*   Cut costs by 33 percent.\n*   Increase the pace of development.\n\n[Learn how](/blog/humangeo-switches-jenkins-gitlab-ci/)\n{: .alert .alert-gitlab-purple}\n\n### Wag!\n\nIn three years, Wag! has supported more than one billion walks through its on-demand dog walking, sitting, and boarding mobile app. The engineering team was searching for a simplified solution that would streamline the development process. The company had been using Travis and other continuous integration and delivery systems but wanted something with a better interface that offered more control.\n\nWag!'s infrastructure engineers no longer have to manually stage and test their work. They now use the full GitLab CI/CD pipeline – so whether it's the Android application, the web application, the API, or infrastructure, it's all being tested, built, and deployed through GitLab.\n\n[Check it out](/blog/wag-labs-blog-post/)\n{: .alert .alert-gitlab-purple}\n\n### Paessler AG\n\nPaessler AG’s PRTG Network Monitor is used by enterprises and organizations of all sizes and industries across more than 170 countries. It’s critical that their monitoring service is able to keep up with developments but stability issues meant that sometimes things just stopped working.\n\nThe Paessler team initially chose GitLab for version control, but after seeing the functionality and potential of GitLab pipelines, they decided to replace Jenkins as well. Since adopting GitLab CI/CD, the Paessler AG team now has 4x more releases and 90 percent of QA self-served.\n\n[Read the case study](/customers/paessler/)\n{: .alert .alert-gitlab-purple}\n\nWant to know what GitLab CI/CD could do for your team? You’re invited to join us for our CI/CD webcast, _Mastering continuous software development_. Learn how GitLab’s built-in CI/CD helps teams apply continuous software development without all the complicated integrations and plugin maintenance.\n\nIn this webcast, we’ll cover:\n\n* Three main approaches to the continuous software development methodology.\n* The benefits of continuous integration, delivery and deployment practices.\n* A demonstration of GitLab’s CI/CD pipeline to build, test, deploy, and monitor your code.\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nWatch GitLab's [Mastering continuous software development](/webcast/mastering-ci-cd/) webcast\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n","engineering",[720,721,9],"customers","DevOps",{"slug":723,"featured":6,"template":700},"5-teams-that-made-the-switch-to-gitlab-ci-cd","content:en-us:blog:5-teams-that-made-the-switch-to-gitlab-ci-cd.yml","5 Teams That Made The Switch To Gitlab Ci Cd","en-us/blog/5-teams-that-made-the-switch-to-gitlab-ci-cd.yml","en-us/blog/5-teams-that-made-the-switch-to-gitlab-ci-cd",{"_path":729,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":730,"content":736,"config":743,"_id":745,"_type":14,"title":746,"_source":16,"_file":747,"_stem":748,"_extension":19},"/en-us/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform",{"title":731,"description":732,"ogTitle":731,"ogDescription":732,"noIndex":6,"ogImage":733,"ogUrl":734,"ogSiteName":685,"ogType":686,"canonicalUrls":734,"schema":735},"6 ways SMBs can leverage the power of a DevOps platform","Bringing a DevOps platform into a small business can be a game changer. It can also cut down on the hat wearing. Here are the top 6 benefits.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668224/Blog/Hero%20Images/inside-our-new-development-team-lead-persona.jpg","https://about.gitlab.com/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"6 ways SMBs can leverage the power of a DevOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-04-12\",\n      }",{"title":731,"description":732,"authors":737,"heroImage":733,"date":739,"body":740,"category":741,"tags":742},[738],"Sharon Gaudin","2022-04-12","\nA small or medium-sized business (SMB) or enterprise (SME) is likely working with a small staff but facing a big workload and even bigger expectations. Creating applications that will expand the customer base, keep up with a changing market, and take on competitors with deeper pockets can be daunting.\n\nIt’s possible to ease those burdens by choosing a single, end-to-end DevOps platform. Productivity will skyrocket and so will opportunities to [grow the company](https://page.gitlab.com/resources-ebook-smb-beginners-guide-devops.html).\n\nOf course, DevOps offers significant technical benefits, like testing and building at scale with [continuous integration and continuous delivery](/blog/how-to-keep-up-with-ci-cd-best-practices/), a shorter lead time with automated deployment, and [fewer production failures with earlier error detection](/blog/iteration-on-error-tracking/). But a DevOps platform also offers myriad business benefits to help support and expand a start-up or SMB.\n\nHere are six more ways a DevOps platform can help an SMB:\n\n## Improved customer satisfaction\n\nUsing a DevOps platform means iteration can happen faster. And that’s critical for SMBs that need to be able to quickly make changes to meet customer needs. DevOps also provides a way to [better monitor users’ feedback](/blog/cd-unified-monitor-deploy/) and makes it easier to respond with more speed and agility. And it reduces Change Failure Rates, increasing application reliability and stability.\n\nAll of this means SMBs will be more able to give clients what they want and need, all while creating an engaging customer experience. Closer customer ties create trust and keep users loyal to products. \n\n## Better security\n\nA DevOps platform embeds security to help seamlessly achieve a DevSecOps approach, a cornerstone of [incorporating security scanning early in the software development lifecycle](/blog/efficient-devsecops-nine-tips-shift-left/). By integrating testing and security reviews earlier in the process, and by using end-to-end automation, there are more opportunities to quickly and efficiently address any security issues. This reduces the time between designing new, higher-quality features and rolling them out into production. That's the beauty of a platform approach to DevOps – security isn't an afterthought. It’s part of the entire process.\n\nDevOps not only speeds production but creates more secure applications. And, simply put, more secure software makes for a more trusted product offering… and for happier, more satisfied customers.\n \n## True collaboration and innovation\n\nCollaboration is one of the basic tenets of DevOps. By [fostering communication and innovation](/blog/collaboration-communication-best-practices/), DevOps not only encourages developers and IT to work together, it also supports collaboration throughout the entire company. This is one area where SMBs have a huge advantage: With fewer employees, who also might be less set in their ways, collaboration and innovation are inherently more inclusive in a small business. [An SMB or start-up is never too small for DevOps](/blog/can-an-smb-or-start-up-be-too-small-for-a-devops-platform/). By inviting discussion and assistance from all team members, DevOps creates a culture built around learning from and relying on others’ expertise; it also brings more ideas to the table. \n\n## Happier employees and better retention\n\nThe greatest resource a company has is its people. This is even more true for small companies where the pain of employee dissatisfaction and departure is felt even more acutely. Managers also don’t want projects waylaid because the people driving them are leaving.\n\nTo stop that from happening, it’s critical the workplace [keeps employees happy](/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen/). \n\nRetaining a tech team isn’t just about perks, like in-office meditation pods, cereal stations, and foosball tables. Companies also need to give developers the processes and tools they need to be efficient, add automation, and make it easier to find and fix security and compliance issues. A single, end-to-end DevOps platform offers a solution for all of those issues. In our [2021 Global DevSecOps Survey](/developer-survey/), more than 13% of respondents said DevOps makes developers happier or makes their team more attractive to potential new employees. \n\n## Improved decision-making\n\nSmall or medium-sized businesses may lack their larger competitors’ resources, but their agility helps them quickly turn a big idea into action that grows the customer base and profits. A DevOps platform has built-in processes and methods to help sustain an SMB’s agile advantage as it grows, so innovative ideas can scale more quickly and smoothly into products, and ultimately new lines of revenue. Automate more and with higher visibility to make fewer and better decisions.\n\n## Wear all the hats\n\nIt might be a cliche, but it’s also true: SMB employees have to wear all the hats. Code writing, customer service, trouble-shooting, accounts payable… SMB teams are masters at multitasking, but that’s not always the most productive way to be.\n\nA DevOps platform makes it [easier to reduce context-switching](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/) and work cross-functionally because everyone is using the same tool. Built-in automation reduces the number of tasks that need to be done manually and aids in collaboration. \n\nAt the end of the day, a complete DevOps platform isn’t a shiny toy, it’s a critical SMB tool. Adopting a platform can make an SMB even more nimble, efficient, and able to scale. DevOps readies an SMB to take on bigger competitors with deeper pockets. And that will enable the business to become what its founders and executives envision.\n","devsecops",[721,697,9],{"slug":744,"featured":6,"template":700},"6-ways-smbs-can-leverage-the-power-of-a-devops-platform","content:en-us:blog:6-ways-smbs-can-leverage-the-power-of-a-devops-platform.yml","6 Ways Smbs Can Leverage The Power Of A Devops Platform","en-us/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform.yml","en-us/blog/6-ways-smbs-can-leverage-the-power-of-a-devops-platform",{"_path":750,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":751,"content":757,"config":764,"_id":766,"_type":14,"title":767,"_source":16,"_file":768,"_stem":769,"_extension":19},"/en-us/blog/a-beginners-guide-to-continuous-integration",{"title":752,"description":753,"ogTitle":752,"ogDescription":753,"noIndex":6,"ogImage":754,"ogUrl":755,"ogSiteName":685,"ogType":686,"canonicalUrls":755,"schema":756},"A beginner's guide to continuous integration","Here's how to help everyone on your team, like designers and testers, get started with GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679800/Blog/Hero%20Images/beginners-guide-to-ci.jpg","https://about.gitlab.com/blog/a-beginners-guide-to-continuous-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A beginner's guide to continuous integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Riccardo Padovani\"}],\n        \"datePublished\": \"2018-01-22\",\n      }",{"title":752,"description":753,"authors":758,"heroImage":754,"date":760,"body":761,"category":718,"tags":762},[759],"Riccardo Padovani","2018-01-22","\n\nAt [fleetster](https://www.fleetster.net/) we have our own instance of GitLab and we rely a lot on [GitLab CI/CD](/topics/ci-cd/). Also our designers and QA guys use (and love) it, thanks to its advanced features.\n\n\u003C!-- more -->\n\nGitLab CI/CD is a very powerful system of [continuous integration (CI)](/solutions/continuous-integration/), with a lot of different features, and with every new release, new features land. It has very rich [technical documentation](https://docs.gitlab.com/ee/ci/), but it lacks a generic introduction for people who want to use it in an existing setup. A designer or a tester doesn’t need to know how to autoscale it with [Kubernetes](/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab/) or the difference between an image or a service.\n\nBut still, they need to [know what a pipeline is](/topics/ci-cd/cicd-pipeline/), and how to see a branch deployed to an environment. In this article therefore I will try to cover as many features as possible, highlighting how the end users can enjoy them; in the last months I explained such features to some members of our team, also developers: not everyone knows what continuous integration is or has used Gitlab CI/CD in a previous job.\n\nIf you want to know why continuous integration is important I suggest reading [this article](/blog/7-reasons-why-you-should-be-using-ci/), while for finding the reasons for using Gitlab CI/CD specifically, I leave the job to [GitLab](/solutions/continuous-integration/) itself.\n\n## Introduction\n\nEvery time developers change some code they save their changes in a commit. They can then push that commit to GitLab, so other developers can review the code.\n\nGitLab will also start some work on that commit, if GitLab CI/CD has been configured. This work is executed by a runner. A runner is basically a server (it can be a lot of different things, also your PC, but we can simplify it as a server) that executes instructions listed in the `.gitlab-ci.yml` file, and reports the result back to GitLab itself, which will show it in his graphical interface.\n\nWhen developers have finished implementing a new feature or a bugfix (activity that usual requires multiple commits), they can open a merge request, where other members of the team can comment on the code and on the implementation.\n\nAs we will see, designers and testers can also (and really should!) join this process, giving feedback and suggesting improvements, especially thanks to two features of GitLab CI: environments and artifacts.\n\n## CI/CD pipelines\n\nEvery commit that is pushed to GitLab generates a pipeline attached to that commit. If multiple commits are pushed together the pipeline will be created for the last one only. A pipeline is a collection of jobs split in different stages.\n\nAll the jobs in the same stage run concurrently (if there are enough runners) and the next stage begins only if all the jobs from the previous stage have finished with success.\n\nAs soon as a job fails, the entire pipeline fails. There is an exception for this, as we will see below: if a job is marked as manual, then a failure will not make the pipeline fail.\n\nThe stages are just a logical division between batches of jobs, where it doesn’t make sense to execute the next job if the previous failed. We can have a `build` stage, where all the jobs to build the application are executed, and a `deploy` stage, where the build application is deployed. Doesn’t make much sense to deploy something that failed to build, does it?\n\nEvery job shouldn’t have any dependency with any other job in the same stage, while they can expect results by jobs from a previous stage.\n\nLet’s see how GitLab shows information about stages and stages’ status.\n\n\u003Cimg src=\"/images/blogimages/pipeline-overview.png\" alt=\"Pipeline overview\" style=\"width: 700px;\"/>{: .shadow}\n\n\u003Cimg src=\"/images/blogimages/pipeline-status.png\" alt=\"Pipeline status\" style=\"width: 700px;\"/>{: .shadow}\n\n## What is a CI job?\n\nA job is a collection of instructions that a runner has to execute. You can see in real time what the output of the job is, so developers can understand why a job fails.\n\nA job can be automatic, so it starts automatically when a commit is pushed, or manual. A manual job has to be triggered by someone manually. This can be useful, for example, to automate a deploy, but still to deploy only when someone manually approves it. There is a way to limit who can run a job, so only trustworthy people can deploy, to continue the example before.\n\nA job can also build artifacts that users can download, like it creates an APK you can download and test on your device; in this way both designers and testers can download an application and test it without having to ask for help to developers.\n\nOther than creating artifacts, a job can deploy an environment, usually reachable by an URL, where users can test the commit.\n\nJob status are the same as stages status: indeed stages inherit theirs status from the jobs.\n\n\u003Cimg src=\"/images/blogimages/running-job.png\" alt=\"Running job\" style=\"width: 700px;\"/>{: .shadow}\n\n## Artifacts\n\nAs we said, a job can create an artifact that users can download to test. It can be anything, like an application for Windows, an image generated by a PC, or an APK for Android.\n\nSo you are a designer, and the merge request has been assigned to you: you need to validate the implementation of the new design!\n\nBut how to do that?\n\nYou need to open the merge request, and download the artifact, as shown in the figure.\n\nEvery pipeline collects all the artifacts from all the jobs, and every job can have multiple artifacts. When you click on the download button, a dropdown will appear where you can select which artifact you want. After the review, you can leave a comment on the MR.\n\nYou can also always download the artifacts from pipelines that do not have a merge request open ;-)\n\nI am focusing on merge requests because usually that is where testers, designers, and shareholders in general enter the workflow.\n\nBut merge requests are not linked to pipelines: while they integrate nicely with one another, they do not have any relation.\n\n\u003Cimg src=\"/images/blogimages/download-artifacts.png\" alt=\"Download artifacts\" style=\"width: 700px;\"/>{: .shadow}\n\n## CI/CD environments\n\nIn a similar way, a job can deploy something to an external server, so you can reach it through the merge request itself.\n\nAs you can see, the environment has a name and a link. Just by clicking the link you to go to a deployed version of your application (of course, if your team has set it up correctly).\n\nYou can also click on the name of the environment, because GitLab also has other cool features for environments, like [monitoring](https://gitlab.com/help/ci/environments.md).\n\n\u003Cimg src=\"/images/blogimages/environment.png\" alt=\"environment\" style=\"width: 700px;\"/>{: .shadow}\n\n## Conclusion\n\nThis was a small introduction to some of the features of GitLab CI: it is very powerful, and using it in the right way allows all the team to use just one tool to go from planning to deploying. A lot of new features are introduced every month, so keep an eye on the [GitLab blog](/blog/).\n\nFor setting it up, or for more advanced features, take a look at the [documentation](https://docs.gitlab.com/ee/ci/).\n\nIn fleetster we use it not only for running tests, but also for having automatic versioning of the software and automatic deploys to testing environments. We have automated other jobs as well (building apps and publishing them on the Play Store and so on).\n\n\n## About the guest author\n\nRiccardo is a university student and a part-time developer at [fleetster](https://www.fleetster.net/). When not busy with university or work, he likes to contribute to open source projects.\n\n *[An introduction to continuous integration](https://rpadovani.com/introduction-gitlab-ci) was originally published on rpadovani.com.*\n\n*Cover photo by [Mike Tinnion](https://unsplash.com/photos/3ym6i13Y9LU?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)*\n{: .note}\n",[9,763],"user stories",{"slug":765,"featured":6,"template":700},"a-beginners-guide-to-continuous-integration","content:en-us:blog:a-beginners-guide-to-continuous-integration.yml","A Beginners Guide To Continuous Integration","en-us/blog/a-beginners-guide-to-continuous-integration.yml","en-us/blog/a-beginners-guide-to-continuous-integration",{"_path":771,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":772,"content":778,"config":787,"_id":789,"_type":14,"title":790,"_source":16,"_file":791,"_stem":792,"_extension":19},"/en-us/blog/a-ci-component-builders-journey",{"title":773,"description":774,"ogTitle":773,"ogDescription":774,"noIndex":6,"ogImage":775,"ogUrl":776,"ogSiteName":685,"ogType":686,"canonicalUrls":776,"schema":777},"A CI/CD component builder's journey","Learn how a creator of shared, includable templates upskilled by migrating the templates to GitLab CI/CD components and the CI/CD Catalog.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663857/Blog/Hero%20Images/blog-image-template-1800x945__12_.png","https://about.gitlab.com/blog/a-ci-component-builders-journey","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A CI/CD component builder's journey\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2024-06-04\",\n      }",{"title":773,"description":774,"authors":779,"heroImage":775,"date":781,"body":782,"category":783,"tags":784},[780],"Darwin Sanoy","2024-06-04","I've always found it fascinating that my father, a heavy-duty mechanic by trade, would make his own tools for challenging jobs for which his industry had not yet built a fit-to-purpose tool. Little did I realize I'd become a tool builder in IT, which has been one of my loves for many years now.\n\nI have been building GitLab CI/CD includable, shared templates since starting with GitLab over four years ago. They were designed in a specific way for others to depend directly on them – similar to the dependency managers you see in application languages like Node.js NPM, Python Pypi, and .NET NuGet.\n\nGitLab itself has had long experience in building these shared CI dependencies through [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) and all of our security scanning suite of tools.\n\nWith the introduction of [GitLab CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/), this long-running approach is formalized into a way for everyone to publish GitLab CI/CD components for use by anyone in the world.\n\nSome of they key upgrades compared to the shared templates approach include:\n\n- **Independent component versions** are a new versioning mechanism that no longer relies on inheriting containers versions. GitLab CI/CD component versions bundle together the CI code and any number of containers (or no containers) behind a single CI/CD component version. The concepts of stable, production-grade DevSecOps require the ability to peg dependency versions in automation – for exactly the same reasons and benefits that this is done in production-grade application code.\n\n- **Global visibility (with control)** is available through the catalog at GitLab.com (or global to your company on a self-managed instance). Individual component visibility is also subject to the security settings of it's source project - so you can publish components to secure groups.\n\n- **Catalog metadata,** like most code-sharing mechanisms, is needed data to make decisions about which components to use.\n\n## Let's show some code\n\nI much prefer to show than tell, so let's look at a few component examples - all of which also publish their sources publicly (click on the title to access the component).\n\n### 1. [Hello World](https://gitlab.com/explore/catalog/guided-explorations/ci-components/hello-world)\nI noticed that there was not yet a Hello World component that could show the minimum viable component, both the results and the source. This particular example shows how to \"componentize\" just CI code.\n\n### 2. [Hello World Container](https://gitlab.com/explore/catalog/guided-explorations/ci-components/hello-world-container)\nFrequently a CI/CD component will require a container to be fully functional. This example includes a container that is published in the same project as the component itself.\n\n### 3. [GitVersion Ultimate Auto Semversioning](https://gitlab.com/explore/catalog/guided-explorations/ci-components/ultimate-auto-semversioning)\n\nThis component automates the venerable \"GitVersion\" utility, which completely automates selecting the next semversion for your software without having to store the last version – even for busy repositories where many production-possible candidates are being worked on at once. One of the building principles this component follows is the principle of \"least configuration\" or \"default to doing the most useful thing with zero configuration.\" In this case, if your project does not contain a `GitVersion.yml`, the component creates the one that an individual unfamiliar with GitVersion might find to be the most useful starting point.\n\n### 4. [Amazon CodeGuru Secure SAST Scanner](https://gitlab.com/explore/catalog/guided-explorations/ci-components/aws/amazon-codeguru-secure-sast)\nThis component is a security scanner and, as such, follows some security scanning best practices I have implemented during recent years. For instance, if it detects that you are licensed for GitLab Ultimate, it has the scanner output GitLab's SAST JSON format, which integrates the findings just like native GitLab scanner findings. The findings appear in MRs and dashboards and can be the target of security policy merge approvals. If, however, you are not licensed for GitLab Ultimate, the scanner outputs JUNIT XML so that you have some basic, non-diffed findings visualization in the pipeline \"Test Results\" tab. It also only activates if there are file types it can scan and disables if the GitLab SAST_DISABLED property is turned on.\n\n### 5. [Checkov IaC SAST](https://gitlab.com/explore/catalog/guided-explorations/ci-components/checkov-iac-sast)\nCheckov IaC SAST is another security scanner component that also follows the above security scanner principles, but specifically for the file types it is capable of scanning. A critical best practice of many of these components is pegging container tags for stability - but doing so through a \"component input\" with a default value. This allows component users to test with and peg to a newer or older version than you last tested with. So your shared dependency then offers stability, but with flexibility.\n\n### 6. [Super-Linter](https://gitlab.com/guided-explorations/ci-components/super-linter)\nSuper-Linter is a community-driven conglomeration of many linters for many languages. It originally started life as a GitHub Action, so this particular example demonstrates some of the ease of porting open source GitHub Actions to GitLab CI/CD components. A best practice aspect to many of my components is to always link to working example code with the component in action. This also allows you to do easy testing when performing updates.\n\n### 7. [Kaniko](https://gitlab.com/explore/catalog/guided-explorations/ci-components/kaniko)\nKaniko is a container that can build containers without Docker-in-Docker (DinD) privileged mode requirement. This component supports many OpenContainers labels and multi-arch builds.\n\n### 8. [CI Component Publishing Utilities](https://gitlab.com/explore/catalog/guided-explorations/ci-components/ci-component-pub)\nAs I built more components, I noticed that my \"component publishing CI code\" was being duplicated many times - and that makes it a candidate for becoming a component itself. All the other components here leverage this component. It also uses components itself, so it uses **GitVersion Ultimate Auto Semversioning** to get the next version.\n\nAnd if you're wondering, yes, CI Component Publishing Utilities publishes itself. In many of my components I have expanded the standard \"Inputs\" README section to \"Inputs and Configuration\" and I have added a column to show whether configurations are happening via inputs or variables. While you generally want to favor inputs, there are times when variables give more flexibility or you just want to document that the user can get perform key configurations of the underlying utilities via environment variables that the utility already supports. CI Component Publishing Utilities also uses the **Kaniko** CI component to build a container with the same version if it finds a Dockerfile at the root of your project (or you tell it where one is with a variable). This synchronizes the version of components and containers that support them. It also handles multi-arch container builds - see the documentation linked above to learn more!\n\n## Getting started with component templates\n\nThe Hello World components function as my own personal templates for starting a new component. They incorporate the CI Component Publishing Utilities and a reasonably good README.\n\nFor components that contain only CI code, I start by copying the source of [Hello World](https://gitlab.com/explore/catalog/guided-explorations/ci-components/hello-world) and for ones that require a container, I start with [Hello World Container](https://gitlab.com/explore/catalog/guided-explorations/ci-components/hello-world-container). I generally copy just the source into a new project so that I have a clean commit history.\n\nWhen I feel the component is stable and well developed I do a manual pipeline run and force the version to 1.1.0 or greater. The CI Component Publishing Utilities will then auto-increment the version from there.\n\n## CI component Builders Guides and practices\n\n[Darwins CI Component Builders Guide](https://gitlab.com/guided-explorations/ci-components/gitlab-profile) - I was also interested in publishing my approach to building components and what better way to get visibility than as a CI/CD component? BTW, the [GitLab Pipeline Authoring](https://about.gitlab.com/direction/verify/pipeline_composition/) team that created the CI/CD component architecture and [CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/) has some great best practices published at [CI components best practices](https://docs.gitlab.com/ee/ci/components/#best-practices). The practices I publish reference these ones, but I also have quite a few I follow that are specific to my own lessons learned.\n\n## Finding the CI/CD components and their sources\n\nThe [GitLab CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/) is still undergoing innovation in searchability. However, the description from the source project is free-form searchable, so by including standard text in the descriptions of all my component source projects, I have created the ability for users to [find all of the ones I've created in the catalog](https://gitlab.com/explore/catalog?search=Part+of+the+DarwinJS+Builder+Component+Library).\n\nTo make my component source findable regardless of its location on GitLab.com:\n- I add a repository topic to all the projects called [DarwinJS Component Builder Library](https://gitlab.com/explore/projects/topics/DarwinJS+Component+Builder+Libary).\n- I tag with the organic tag I found called [`GitLab CICD Components`](https://gitlab.com/explore/projects/topics/GitLab+CICD+Components).\n\nBoth of the above techniques can help you provide an index to your components and their source if you are inclined to do so.\n\nI hope that my CI/CD component building journey will be helpful to you now and in the future.\n\n> Learn more about the CI/CD Catalog and components:\n>  \n> - [CI/CD Catalog goes GA: No more building pipelines from scratch](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/)\n> \n> - [FAQ: GitLab CI/CD Catalog](https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog/)\n>\n> - [Documentation: CI/CD components and CI/CD Catalog](https://docs.gitlab.com/ee/ci/components/)\n> \n> - [Introducing CI/CD components and how to use them in GitLab](https://about.gitlab.com/blog/introducing-ci-components/)\n>","open-source",[9,785,786],"CI","CD",{"slug":788,"featured":6,"template":700},"a-ci-component-builders-journey","content:en-us:blog:a-ci-component-builders-journey.yml","A Ci Component Builders Journey","en-us/blog/a-ci-component-builders-journey.yml","en-us/blog/a-ci-component-builders-journey",{"_path":794,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":795,"content":801,"config":807,"_id":809,"_type":14,"title":810,"_source":16,"_file":811,"_stem":812,"_extension":19},"/en-us/blog/a-look-ahead-for-gitlab-cicd",{"title":796,"description":797,"ogTitle":796,"ogDescription":797,"noIndex":6,"ogImage":798,"ogUrl":799,"ogSiteName":685,"ogType":686,"canonicalUrls":799,"schema":800},"New up and coming GitLab CI/CD Features","DAG, Multi-project Pipelines, Runner Setup for Kubernetes and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666889/Blog/Hero%20Images/photo-cicd12xlookahead.jpg","https://about.gitlab.com/blog/a-look-ahead-for-gitlab-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New up and coming GitLab CI/CD Features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2019-08-07\",\n      }",{"title":796,"description":797,"authors":802,"heroImage":798,"date":804,"body":805,"category":718,"tags":806},[803],"Jason Yavorska","2019-08-07","\n_Since this blog post was published, we have updated our planning based on emerging priorities and customer need. For the latest on what we've got coming next, check out our [CI/CD direction page](/direction/ops/), which is always current._\n\nHey everyone, [Jason Yavorska](https://gitlab.com/jyavorska) here – product manager for CI/CD at GitLab. Back in June we\nreached the mid-point of the year and we're heading into our big 12.0 release, so I took the opportunity to\nsummarize some of the [highlights of our 11.x series of releases](/blog/look-back-on-11-11-cicd/).\nHopefully you had a chance to read it, if not, please take a moment to scan through and I bet you'll find an\ninteresting feature or two that can help improve your pipelines.\n\nWe're a couple of releases into the 12.x cycle now and I couldn't wait to share some\nof the things that we're looking forward to delivering the remainder of this year. Some of the features I am most excited about include DAG, a directed acyclic graph that makes it easy to run pipeline steps out of order, expanding our pipelines for merge requests/results feature to also work with forks, as well as making multi-project pipelines a Core feature. With about 3.44M job instances per week/13.76M per month, GitLab CI is growing at a rapid rate to help our customers and users with their deployment needs. Read on below to learn more about all of the exciting CI/CD features in the 12.0 series of releases that will help you to deploy your code quickly.\n\n## What's recent\n\nIn 12.0, we released [visual reviews](https://docs.gitlab.com/ee/ci/review_apps/index.html#visual-reviews),\nwhich allows users to provide issue feedback directly from the review apps that\nyour pipelines create. This makes it easy for all your team members to provide accurate\nfeedback on the changes you're making. We also added [collapsible job logs](https://docs.gitlab.com/ee/ci/pipelines/index.html#expand-and-collapse-job-log-sections),\nmaking output of pipelines easier to use, and enabled [multiple extends](https://docs.gitlab.com/ee/ci/yaml/#extends)\nfor pipeline jobs to make templatizing behaviors in your configuration even easier.\n\n![Visual Review Apps](https://about.gitlab.com/images/12_0/visual-review-apps.png \"Visual Review Apps\"){: .shadow.medium.center}\n\n[Visual Review Apps](https://docs.gitlab.com/ee/ci/review_apps/index.html#visual-reviews) were released in GitLab 12.0\n{: .note .text-center}\n\nIn 12.1, we delivered [parallel execution for merge trains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html),\nexpanding on our [pipelines for merged results](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html)\nto make it very easy to automatically build and test a series of merge requests heading\ninto the same target branch in a fast, safe, and efficient way. For GitLab Pages we also\nadded [automatic HTTPS certificate renewal](https://docs.gitlab.com/ee/user/project/pages/custom_domains_ssl_tls_certification/lets_encrypt_integration.html),\nand completely refactored the GitLab Runner to be able to be [extensible for custom behaviors](http://docs.gitlab.com/runner/executors/custom.html),\nenabling many new kinds of operation modes for your runners including but not limited to\nsupporting any kind of proprietary virtualization environment.\n\n## What's next\n\nNow that you're up to speed with the first couple of 12.x releases, let's look ahead to what's coming next in each monthly release from 12.2 this month to 12.6 in December.\n\n## 12.2 (August 22)\n\n_Since this blog post was published, we have updated our planning based on emerging priorities and customer need. For the latest on what we've got coming next, check out our [CI/CD direction page](/direction/ops/), which is always current._\n\n12.2 is just around the corner and it's also looking to be a big one.\n\nOne really exciting feature for this release is that we're adding a hybrid directed acyclic graph (DAG) to GitLab CI.\nThis is really just a fancy way of saying you'll be able to run pipeline steps out of order, breaking the\nstage sequencing you're familiar with in GitLab, and allowing jobs to relate to each other directly. This can\nbe valuable for monorepo situations where you have different folders in your repo that can build, test, and maybe\neven deploy independently, or in general it can provide a nice speed boost for your pipeline steps that relate to\neach other (for example, things like artifact processing or sequential test runs.) Read more in our [public issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/47063)\nabout how this great feature is going to work.\n\n![Directed Acyclic Graph](https://about.gitlab.com/images/blogimages/dag_execution.png \"Directed Acyclic Graph\"){: .shadow.medium.center}\n\nOut of order execution using the [Directed Acyclic Graph](https://gitlab.com/gitlab-org/gitlab-ce/issues/47063)\n{: .note .text-center}\n\nIn addition to the DAG, we're rethinking the way that [rules can be set up for pipelines](https://gitlab.com/gitlab-org/gitlab-ce/issues/60085),\nmaking it much easier to understand what a job is going to do compared with trying to figure out how a collection\nof `only/except` rules interact with each other. Another highlight is that we're adding the ability to\n[control behavior for individual users with Feature Flags](https://gitlab.com/gitlab-org/gitlab-ee/issues/11459) along with\n[percentage rollout across all users](https://gitlab.com/gitlab-org/gitlab-ee/issues/8240). These will give you a lot of\nflexibility to [progressively control](/direction/ops/#progressive-delivery) how changes are rolled out to your users\neven when the code is already in production.\n\n## 12.3 (September 22)\n\n_Since this blog post was published, we have updated our planning based on emerging priorities and customer need. For the latest on what we've got coming next, check out our [CI/CD direction page](/direction/ops/), which is always current._\n\nThe individual change in the 12.3 release that I'm most excited about has got to be\n[associating a milestone with a release](https://gitlab.com/gitlab-org/gitlab-ce/issues/62402). One of the greatest\nstrengths of GitLab is the connected ecosystem of features – by tying a release to a milestone, it becomes\npossible to connect all kinds of interesting data in GitLab to the release – issues, merge requests, and more, all\nat your fingertips and curated automatically by GitLab.\n\nWe're also going to be making [runner setup for Kubernetes](https://gitlab.com/gitlab-org/gitlab-ce/issues/63768)\nrequire just a single click to get going, and making a key architectural change to GitLab Pages that will\n[bring initial availability time for pages site down to nearly instantaneous](https://gitlab.com/gitlab-org/gitlab-ce/issues/61929).\n\n## 12.4 (October 22)\n\n_Since this blog post was published, we have updated our planning based on emerging priorities and customer need. For the latest on what we've got coming next, check out our [CI/CD direction page](/direction/ops/), which is always current._\n\nFirst up, we're planning on adding a [Hashicorp Vault integration](https://gitlab.com/gitlab-org/gitlab-ce/issues/61053) that will let you tie your\nGitLab CI pipelines to your Vault instance, making it possible to keep crucial build and deployment secrets outside\nof GitLab entirely.\n\nWe're also [expanding our pipelines for merge requests/results feature to also work with forks](https://gitlab.com/gitlab-org/gitlab-ee/issues/11934),\nand (building on top of the newly associated milestone) delivering an MVC for fully automated [evidence collection for releases](https://gitlab.com/gitlab-org/gitlab-ce/issues/56030).\nThis means that things like test results, pipeline outputs, merge requests, and issues will have a snapshot\navailable for auditing and review in the context of a release, all collected automatically from throughout GitLab\nwithout having to write a line of code.\n\n## 12.5 (November 22)\n\n_Since this blog post was published, we have updated our planning based on emerging priorities and customer need. For the latest on what we've got coming next, check out our [CI/CD direction page](/direction/ops/), which is always current._\n\nFor 12.5, we plan to tackle Helm v3 charts by providing features in our container registry to\nmanage these. [Helm v3](https://helm.sh/blog/helm-3-preview-pt1/) changes a lot about how charts work, and\nwe want to ensure that GitLab is there with you as you start to adopt this very different, but powerful new way\nof working.\n\nWe also plan to revisit [how workspaces are defined and shared](https://gitlab.com/gitlab-org/gitlab-ce/issues/62802),\nmaking it easier to build up a common staging area that can be shared by different jobs/pipelines in an easier-to-use,\nmore natural way than by using the cache or artifacts in GitLab today. Last but not least, we're improving on\nour testing parallelization features by making it possible to [leave the parallelization tuning to GitLab itself](https://gitlab.com/gitlab-org/gitlab-ee/issues/12282).\n\n## 12.6 (December 22)\n\n_Since this blog post was published, we have updated our planning based on emerging priorities and customer need. For the latest on what we've got coming next, check out our [CI/CD direction page](/direction/ops/), which is always current._\n\nFor the holidays we're planning on [making multi-project pipelines a Core feature](https://gitlab.com/gitlab-org/gitlab-ce/issues/63497),\nbringing this powerful capability to all of our users. More and more we're hearing that teams are using multi-project\npipelines in all kinds of interesting ways to solve unique problems, and we want to make this feature available to\neveryone who can benefit. EDIT 2020-01-02: We resolved [this issue](https://gitlab.com/gitlab-org/gitlab/issues/31573) back in 12.4 where the trigger keyword was not working in certain cases, which satisfied the request of the folks in that issue to open source the feature. There are potential executive dashboards for cross-project pipelines in the future which will be paid features, but using triggering is in core and working fine. If there are any use cases that are not working for you, please ping me (@jyavorska) in [gitlab#29626](https://gitlab.com/gitlab-org/gitlab/issues/29626) and I'd be happy to take a look.\n\nWe are also bringing in a whole new way of working with GitLab CI/CD: [child/parent pipelines](https://gitlab.com/gitlab-org/gitlab-ce/issues/22972).\nUsing these you'll be able to trigger downstream pipelines from your main pipeline; these will run completely independently\nand in their own separate namespace from the main pipeline, but will provide status attribution back to the main pipeline. These\nchild pipelines are definable in YAML files anywhere in your repo, so if you have a monorepo (for example) you'll be able to organize\nthese independent pipelines separately but still orchestrate them from a central command and control module.\n\nFinally, we're looking to improve how we show the [change in pipeline duration over time](https://gitlab.com/gitlab-org/gitlab-ee/issues/1806)\nas well as how [test runs are changing over time](https://gitlab.com/gitlab-org/gitlab-ee/issues/1020). This trend data will make\nit easier to manage the performance of your pipelines on an ongoing basis.\n\n## In conclusion\n\nHopefully you're as excited about these features as much as we are. We'd love for you to participate\nin the public issues so we can work together to deliver these features with your input. It's\npossible some specific items may change, but overall\nthis is the direction we're headed as we continue to add iterative improvements across all of CI/CD in\nevery release.\n\nInterested in learning more about GitLab CI/CD in general, and seeing all the rest of\nthe items we plan to deliver? Visit our [CI/CD strategy page](/direction/ops/)\nfor our themes, priorities, and more details on what's coming next.\n\nPhoto by [Reginar](https://unsplash.com/photos/4fQAMZNaGUo?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/arrow?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[721,9,695],{"slug":808,"featured":6,"template":700},"a-look-ahead-for-gitlab-cicd","content:en-us:blog:a-look-ahead-for-gitlab-cicd.yml","A Look Ahead For Gitlab Cicd","en-us/blog/a-look-ahead-for-gitlab-cicd.yml","en-us/blog/a-look-ahead-for-gitlab-cicd",{"_path":814,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":815,"content":821,"config":831,"_id":833,"_type":14,"title":834,"_source":16,"_file":835,"_stem":836,"_extension":19},"/en-us/blog/adsoul-devops-transition-to-gitlab-ci",{"title":816,"description":817,"ogTitle":816,"ogDescription":817,"noIndex":6,"ogImage":818,"ogUrl":819,"ogSiteName":685,"ogType":686,"canonicalUrls":819,"schema":820},"How adSoul transitioned to GitLab CI from Jenkins","adSoul, a marketing automation company, outlines a successful three-phase migration plan for moving to GitLab CI from Jenkins.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678442/Blog/Hero%20Images/londoncommit.png","https://about.gitlab.com/blog/adsoul-devops-transition-to-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How adSoul transitioned to GitLab CI from Jenkins\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brein Matturro\"}],\n        \"datePublished\": \"2019-11-05\",\n      }",{"title":816,"description":817,"authors":822,"heroImage":818,"date":824,"body":825,"category":718,"tags":826},[823],"Brein Matturro","2019-11-05","\nadSoul is a Germany-based data-driven online marketing company that aims to improve search engine advertising and scalability for businesses. The core of adSoul relies heavily on API interfaces and entity recognition to post keywords on Google and Bing with marketing automation. \n\nAt GitLab Commit London, [Philipp Westphalen](https://www.linkedin.com/in/philipp-westphalen-a83318188/), fullstack developer at adSoul and GitLab Hero, shares how the company transitioned from Jenkins to GiLab CI. adSoul is a startup company with five developers, and as Philipp says “We literally have no time for everything we need to do.” They were looking for a tool that requires less time-consuming maintanence, and with Jenkins the team found it hard to read their existing files. “Our Jenkins was not so stable at all and it was tough to change because it was managed by our provider,” Philipp says. Cost and visibility were also huge motivators in moving away from [Jenkins to Gitlab CI](/blog/docker-my-precious/).\n\n## GitLab migration in three phases\n\nPhase 1: Move the repository.\nThe [adSoul team](https://www.adsoul.com) used the GitHub Import by GitLab, but had setbacks with migrating their issues, so they created a GitHub open source issue migrator as a resolution. Following that, they modified scripts with the new origin by exchanging the GitHub API call with a GitLab API. “This was really easy and we had a stable build with our new repository, so we could move our product management to GitLab and not need GitHub anymore,” Philipp says.\n\nPhase 2: Migrate the CI/CD pipeline.\nThe team started to create a GitLab CI YAML and tried to do a simple ‘lift and shift,’ however their processes were more complicated than anticipated. Though this phase was time consuming, it became clear the team could move to phase three without hiccups. “Quick pro tip,” says Philipp. “If you’re running your own GitLab runners, increase the log limit if you have to debug your building step.” \n\nPhase 3: Improve the CI/CD pipeline.\nThe team thought about ways of building their software, so they split projects into steps. “Our idea was that one job does one thing perfectly. Each job is simple and everyone can modify it easily” Philipp says. They improved their build time by moving to Gradle, created parallel job processing, and by using standard Docker images for ease of management. \n\n## Takeaways from a successful migration\n\n1. Plan your migration. Get every member of the team involved and aware of the upcoming changes, including how tools are working together and what the expectations are moving forward. “Take your time for the migration,” Philipp says. “It’s not two days and then we are finished.” \n\n2. Go step by step. adSoul used a three phase plan which allowed the team to deploy a new version and still continue to work on existing projects. “We could improve our application without having to wait for a better infrastructure,” Philipp says.\n\n3. Rethink your [DevOps strategy](/blog/better-devops-with-gitlab-ci-cd/). In the time leading up to the migration, examine things like security automation and other important pieces in a DevOps overall strategy.\n\n4. Start with a small project. Work closely with colleagues to create small GitLab CI projects to familiarize everyone before creating larger, overwhelming projects.\n\nPro tip: Keep your pipeline user friendly. Create a good user experience for the team with clear job names, style your config for a better overview, and write comments for variables and hard to understand code. \n\n## Why GitLab works for a small team\n\n“The most important thing is that GitLab is a powerful CI/CD solution with high customization,” Philipp says. There is one home for all projects, without dependencies on one another. With Jenkins, even small exploratory changes can impact the larger job. “With GitLab, you don’t have dependency between branches. So, if you’re trying something new for your CI, you can do it simply in your branch and the master branch will not be affected by the changes,” Philipp says.\n\nThe CI is low maintenance, which is a useful timesaver for a smaller team. “The CI provides us with really low maintenance time. So, usually we don’t have to care about our CI for a month or more,” Philipp says.\n\nTo learn more about adSoul’s migration to GitLab, watch Philipp’s talk from GitLab Commit London.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/C5xfw0ydh2k\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[9,721,827,828,829,830],"open source","performance","startups","cloud native",{"slug":832,"featured":6,"template":700},"adsoul-devops-transition-to-gitlab-ci","content:en-us:blog:adsoul-devops-transition-to-gitlab-ci.yml","Adsoul Devops Transition To Gitlab Ci","en-us/blog/adsoul-devops-transition-to-gitlab-ci.yml","en-us/blog/adsoul-devops-transition-to-gitlab-ci",{"_path":838,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":839,"content":843,"config":852,"_id":854,"_type":14,"title":855,"_source":16,"_file":856,"_stem":857,"_extension":19},"/en-us/blog/ai-in-action-hackathon-celebrating-the-gitlab-innovations",{"config":840,"title":841,"description":842},{"noIndex":6},"AI in Action Hackathon:  Celebrating the GitLab innovations ","Uncover breakthroughs from this AI development showcase that combined Google Cloud, MongoDB, and GitLab.",{"title":841,"description":842,"authors":844,"heroImage":846,"date":847,"body":848,"category":849,"tags":850},[845],"Nick Veenhof","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664458/Blog/Hero%20Images/Gartner_AI_Code_Assistants_Blog_Post_Cover_Image_1800x945.png","2025-08-05","The AI in Action Hackathon offered a compelling opportunity for developers to explore artificial intelligence. Running from May 6 to June 17, 2025, participants developed AI solutions and competed for a $50,000 prize pool. You can find more details about the contest and [explore the projects](https://ai-in-action.devpost.com/project-gallery).\n\nThis hackathon stood out because of a unique collaborative effort, bringing together Google Cloud, MongoDB, and GitLab. The aim was to cultivate an environment for AI development by combining Google Cloud's AI and cloud tools, MongoDB's intelligent data platform for AI, and GitLab's intelligent DevSecOps platform to ship more secure software faster with AI. This partnership allowed developers to integrate these powerful tools, reflecting real-world project dynamics.\n\nThis initiative sought to propel the developer community's growth, and collaboratively shape the future of DevSecOps. GitLab's specific focus in this hackathon was to inspire the creation of AI-enabled applications leveraging both GitLab and Google Cloud. Submissions were encouraged to include contributions to GitLab's product or develop functional components for the [GitLab CI/CD Catalog](https://gitlab.com/explore/catalog). \n\nUltimately, the AI in Action Hackathon became a vibrant stage for developer innovation. It ignited fresh ideas and equipped participants with tangible gains, including new skills, impactful projects for their portfolios, and new professional connections.\n## Meet the winners: AI in action with GitLab\n\nCongratulations to all participants, and specifically to the contest winners. Here's a highlight of the projects that stood out for their deep GitLab integration.\n\n**[Pipeline Doctor: Proactive health for your CI/CD](https://devpost.com/software/pipeline-doctor)**\n*\"As a software engineer, I frequently run into failed GitLab pipelines, often accompanied by cryptic and overwhelming logs. Pinpointing the root cause feels like searching for a needle in a haystack. Debugging becomes even more time-consuming when I have to rely on SREs for support.\" - the project's author*\n\nPipeline Doctor addresses this by using AI for advanced root cause analysis, swiftly diagnosing pipeline anomalies. It analyzes logs and changes to pinpoint errors, and could even explain security issues or predict bottlenecks. This means substantial productivity gains for developers, reclaiming time from troubleshooting to focus on new features. It also makes pipelines more reliable, aligning with goals for 80% faster CI builds and 90% less system maintenance. This project signifies a shift from reactive troubleshooting to proactive health monitoring. \n\nA truly impressive step towards more resilient pipelines.\n\n**[Agentic CICD: The future of automated DevSecOps](https://devpost.com/software/agentic-cicd)**\n\n*\"What if AI agents could handle most of the DevOps workload?”- the project’s author*\n\nAgentic CICD is set to profoundly elevate DevSecOps practices by automating code reviews, suggesting intelligent fixes, and optimizing testing and deployment decisions. These agents can evaluate real-time metrics, automate releases, and even initiate rollbacks without immediate human intervention, creating a self-improving feedback loop. This approach also enhances security by proactively identifying risks. The advantages for development teams are tangible: increased productivity, consistently higher software quality, and improved operational efficiency, accelerating development cycles and time-to-market. Agentic CICD cultivates a pipeline capable of *self-healing* and *self-optimization*, amplifying developer capabilities by automating routine tasks and providing intelligent insights. \n\nThis project truly showcases the next generation of intelligent automation.\n\n**[Agent Anansi: Your intelligent companion in GitLab](https://devpost.com/software/devgenius)**\n\n*“As someone deeply passionate about DevOps and AI, I was frustrated by the fragmented and reactive nature of traditional CI/CD workflows. While automation is widespread, intelligence is often lacking.“ -  the project's author*\n\nAgent Anansi, a name evoking the clever and resourceful spider from folklore, appears to be a versatile AI agent designed to enhance various GitLab workflows beyond the confines of CI/CD. GitLab's broader vision for AI agents includes systems that mirror familiar team roles and serve as foundational building blocks for highly customized agents. This intelligent companion is poised to enhance GitLab workflows by automating repetitive tasks like issue categorization, optimizing search functions, and performing intelligent data analysis. Similar to GitLab Duo's Chat Agent, Anansi could process natural language requests for information or debugging assistance. A compelling application could be an \"AI mentor\" suggesting personalized learning paths. The overall impact on collaboration and efficiency would be substantial, improving developer experience by minimizing manual tasks and reducing context-switching. It would also enhance collaboration by providing instant access to documentation and enabling direct actions through intelligent interaction. Agent Anansi functions as a personalized productivity co-pilot, moving beyond generic tool assistance to a truly personalized experience that increases individual developer efficiency and reduces cognitive load. \n\nA fantastic example of AI making daily development work smarter and more intuitive.\n\n## The power of partnership: Google Cloud, MongoDB, and GitLab fuel innovation\n\nThe AI in Action Hackathon underscored the potency of strategic partnerships in driving innovation. Google Cloud served as a foundational pillar, providing its advanced AI tools, machine learning capabilities, and extensive cloud computing resources as the bedrock for all hackathon projects. MongoDB offered the indispensable intelligent data layer, and GitLab provided the DevSecOps platform essential for building, securing, and deploying these sophisticated AI-enabled applications. Participants were granted access to these powerful tools through free trials or credits, reducing the barriers for experimentation.\n\nThe collaborative synergy among these partners was unmistakable in the multipartner structure of the hackathon. This environment allowed participants to explore a wide array of technologies and integration possibilities, enabling them to create innovative projects that addressed real-world problems. \n\n## Getting to know GitLab's Duo Agent Platform\n\nGitLab is reimagining software development, charting a future where humans and AI collaborate seamlessly. [GitLab Duo Agent Platform](https://about.gitlab.com/gitlab-duo/agent-platform/) allows users to build, customize, and connect AI agents to match their workflow. Developers are empowered to focus on strategic, creative challenges, as AI agents adeptly manage routine tasks such as providing project status updates, bug fixes, and code reviews concurrently.\n\n[Duo Agent Platform is now in public beta](https://about.gitlab.com/blog/gitlab-duo-agent-platform-public-beta/) for GitLab Premium and Ultimate customers on GitLab.com and self-managed environments.\n\n[AI agents](https://about.gitlab.com/topics/agentic-ai/) on the platform leverage comprehensive context from your GitLab projects, code, and requirements. They can also interoperate with other applications or data sources for expanded context and actionable assistance. The platform delivers extensible, customizable agentic AI: Users can create and customize agents and agentic flows that understand their specific work processes and organizational needs. Custom rules can be defined in natural language, ensuring agents perform precisely as configured. A catalog for custom skills, agents, and flows is also planned for future release.\n\nDuo Agent Platform is seamlessly integrated into your workflow, available in your IDE (Integrated Development Environment) or GitLab’s web UI. It currently supports VS Code and the JetBrains family of IDEs, with Visual Studio support planned. This ability to set custom rules for agents, such as specific formatting for code or adherence to language versions, is poised to accelerate reviews and enable swifter deployment of consistent, secure code.\n\nTo get started, GitLab.com customers need to activate GitLab Duo beta features for their group, while self-managed customers need to enable these features for their GitLab Self-Managed instance. For those who are not yet GitLab customers, [a GitLab Ultimate trial](https://about.gitlab.com/free-trial/devsecops/), including Duo Agent Platform, is available at no cost.\n\n## Join the AI revolution: What's next for developers\n\nThe AI in Action Hackathon vividly showcased the transformative potential of artificial intelligence when applied to real-world software development challenges. For developers inspired by these breakthroughs, the journey into AI-powered DevSecOps has just started. Users are encouraged to explore and harness the power of [GitLab Duo](https://about.gitlab.com/gitlab-duo/), which is engineered to substantially elevate productivity, enhance operational efficiency, and reduce security risks across the software development lifecycle. GitLab Duo offers a suite of integrated features, including intelligent Code Suggestions, an interactive Chat agent, AI-assisted Root Cause Analysis for CI/CD failures, and clear explanations for security vulnerabilities — all directly accessible within the platform.\n\nBeyond utilizing these powerful tools, developers are invited to contribute actively to the vibrant [GitLab community](https://about.gitlab.com/community/). This hackathon is an integral part of GitLab's broader community engagement initiative, which encourages contributions to [GitLab's open source community](https://about.gitlab.com/community/). By contributing, developers can directly shape the platform that millions use to deliver software faster and more securely. As a testament to GitLab's commitment to its community, contributors benefit from the very AI-powered tools, such as GitLab Duo, that they help build. Furthermore, GitLab recognizes and rewards community contributions through various programs, including the monthly Notable Contributor initiative and special recognition for Hackathon winners.\n\nThe AI in Action Hackathon showcased how a robust trust infrastructure, combined with emerging AI use cases, is forging a path toward a more trustworthy and efficient digital future. GitLab is dedicated to accelerating the monthly delivery of potent new AI features, with a clear strategic trajectory toward becoming a premier agent orchestration platform. GitLab is poised to empower users to craft, tailor, and disseminate complex agent flows, enabling highly automated and intelligent workflows. The landscape of software development is rapidly transforming, becoming progressively autonomous, adaptive, and AI-driven.\n\nI can’t wait to see what you will build next with GitLab!","ai-ml",[827,851,9],"AI/ML",{"featured":91,"template":700,"slug":853},"ai-in-action-hackathon-celebrating-the-gitlab-innovations","content:en-us:blog:ai-in-action-hackathon-celebrating-the-gitlab-innovations.yml","Ai In Action Hackathon Celebrating The Gitlab Innovations","en-us/blog/ai-in-action-hackathon-celebrating-the-gitlab-innovations.yml","en-us/blog/ai-in-action-hackathon-celebrating-the-gitlab-innovations",{"_path":859,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":860,"content":866,"config":876,"_id":878,"_type":14,"title":879,"_source":16,"_file":880,"_stem":881,"_extension":19},"/en-us/blog/align-engineering-security-appsec-tests-in-ci",{"title":861,"description":862,"ogTitle":861,"ogDescription":862,"noIndex":6,"ogImage":863,"ogUrl":864,"ogSiteName":685,"ogType":686,"canonicalUrls":864,"schema":865},"How Developer-Centric AppSec Testing Transforms DevOps Teams","Find and fix security bugs faster by implementing developer-centric application security testing in the CI pipeline. And the bonus? Engineering and security will finally be better aligned.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681513/Blog/Hero%20Images/stackhawk.jpg","https://about.gitlab.com/blog/align-engineering-security-appsec-tests-in-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How developer-centric AppSec testing can dramatically change your DevOps team\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Joni Klippert\"}],\n        \"datePublished\": \"2020-08-21\",\n      }",{"title":867,"description":862,"authors":868,"heroImage":863,"date":870,"body":871,"category":718,"tags":872},"How developer-centric AppSec testing can dramatically change your DevOps team",[869],"Joni Klippert","2020-08-21","\n\nSoftware development has accelerated dramatically over the past decade. As [DevOps](/topics/devops/) became pervasive, companies went from shipping software monthly to shipping software to production frequently throughout the day. This happened as engineering teams took ownership of the deployment, performance, and resilience of their software. \n\nAnd it has paid off. Companies that have adopted DevOps are deploying software significantly faster, ultimately driving business value as innovation is more rapidly delivered to customers.\n\nSecurity, however, did not keep up. Security teams typically fell into one of two positions - the blocker of frequent deployments or the team perpetually bringing up issues in last month’s work. The need for a shift in the security model is widely known. It was the subject of the [2019 Black Hat Conference keynote](https://www.blackhat.com/us-19/briefings/schedule/index.html#every-security-team-is-a-software-team-now-17280), stats from GitLab’s [2020 Global DevSecOps Survey](https://about.gitlab.com/resources/downloads/2020-devsecops-report.pdf) make this obvious, and we’ve [shared our opinions](https://www.stackhawk.com/blog/application-security-is-broken/) at StackHawk.\n\nI believe there is a solution (or at least a *huge* step in the right direction)... developer-centric [application security](/topics/devsecops/) tooling in the CI pipeline.\n\n## The CI pipeline aligns engineering and security\n\nWhile some in the industry have been debating the term DevSecOps, leading companies have started adopting developer-first security tooling that brings alignment through the CI pipeline. Instrumented correctly, it ensures that security bugs are caught before they hit production and that the fix cycle is drastically shortened.\n\nThe legacy model has security teams running application security tests against production environments. These sort of checks are great if they are your backstop. But if this is the primary way of assessing your application’s security posture, you need to catch up with modern engineering practices. \n\nModern teams are running checks on each microservice that makes up the customer facing application, catching bugs in pipeline, and equipping developers with the information to self serve fixes and triage issues. Fix times are significantly shorter, as developers are still in the context of the code they were working on. By testing microservices vs. the end state application, the underlying bugs are much easier to find and fix. And with developer-centric tooling, developers can fix bugs themselves instead of cycling through siloed internal processes. This structure better aligns each function with their best skill sets. Engineers know the application the best and are most equipped to fix, and security teams are able to focus on strategy instead of Jira ticket creation.\n\nThe key is to get the instrumentation right (read: don’t break the build for stupid stuff).\n\n## Application security tests in CI\n\nThat sounds great in theory, but what does it look like in practice? Getting started is actually more simple than it seems. We suggest adding three application security tests to start:\n\n## Software composition analysis (SCA)\n\nSCA identifies the open source dependencies in your code base and compares that against a database of known security vulnerabilities. Some tools automatically create pull requests to patch outdated libraries. Open source use is exponentially growing, especially with chained dependencies. SCA is incredibly important, but also can be noisy with non-exploitable findings.\n\nSome of the leading vendors in the space are [GitLab](/) and [Snyk](https://snyk.io/), with up and comers like [FOSSA](https://fossa.com/) also worth paying attention to.\n\n## Dynamic application security testing (DAST)\n\nDAST runs security tests against your running application, from localhost to CI to production. The beauty of DAST is that it most closely resembles what an attacker would see, by attacking your running application and reducing false positives. The two things to be sure of as you start testing with DAST is that your scanner is finding all of your paths and API endpoints and that it is able to scan as an authenticated user.\n\nGitLab provides DAST checks for Ultimate tier customers. If you want more robust scanning options and additional functionality to manage and fix bugs, [StackHawk](https://www.stackhawk.com) is the only place to turn (obviously I’m biased here). Other solutions include legacy vendors such as [Rapid7](https://www.rapid7.com/) or open source leader [ZAP](https://www.zaproxy.org/).\n\n## Secrets detection\n\nFinally, you’ll want to ensure that you have detection for leaked secrets in code. This tooling looks for credentials, keys, or other secrets that may have unintentionally been committed to the code base by developers. GitLab includes [secret detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/) in their GitLab Ultimate security tooling.\n\n## Getting started\n\nOftentimes, the thought of adding application security tests to the development workflow feels insurmountable. With a long list of priorities, engineering leadership will sometimes put this off. The reality, however, is that it is not that hard.\n\nAt StackHawk, we see many customers completing their first successful scans within 15 minutes of sign up and instrumentation in CI is literally as easy as adding [a few lines of YAML](https://docs.stackhawk.com/continuous-integration/) to your build.\n\nHere is our recommended playbook of how to get started with AppSec in CI. While this is specific to StackHawk, the principles can be applied to other tools as well.\n\n### Step 1: local testing and config\nAfter signing up and grabbing your API key, start iterating on [configuration](https://docs.stackhawk.com/hawkscan/configuration/) while testing against your application on localhost. This allows you to quickly adjust config and get successful authenticated scans running.\n\n### Step 2: non-blocking CI instrumentation\nAfter you’ve ironed out the configuration locally, add the test to your CI pipeline. At this point, it is strongly recommended to instrument as a non-blocking test so that you can triage any existing findings and smooth out any kinks.\n\n#### Step 3: bug triage - fix critical issues in flight, backlog and discuss the rest\nAfter your first non-blocking CI run, start triaging any initial findings. Any bugs marked as High criticality should likely be fixed with some sense of urgency. Lows and Mediums should be triaged depending on your application and the bugs, either quickly addressed or added to a backlog for review. Existing findings should not be the blocker for you instrumenting checks to ensure that new bugs don’t get shipped to production.\n\n#### Step 4: switch to blocking tests\nAfter ironing out config locally and in CI, and then triaging initial findings, it is time to finalize the roll out. Switch the StackHawk test to blocking mode to ensure that new security bugs don’t hit production. You can set the scanner to break on High or Medium and High, which depends on your business and the nature of the application. With this in place, you can be confident that production-ready applications have been scanned for security.\n\n## Cultural shifts: it is more than CI\nThe CI pipeline is the natural hingepoint to start aligning engineering and security. A cultural shift, however, is absolutely needed. (If you're doubtful about this, here's a frank look at why [dev and sec don't get along](/blog/developer-security-divide/).) Modern engineering teams recognize that delivering a secure application is part of quality engineering. Engineers aren’t comfortable shipping applications with UI bugs, and they shouldn’t accept security holes either. \n\nSecurity, on the other hand, needs to shift from the blocker to speedy development and to the enabler of safety in an environment of high speed delivery. Modern security engineers are ensuring that their teams are working with safe-by-default frameworks, are equipped with developer-centric tooling, and that there are proper integration tests for business logic that can’t be tested by external tooling.\n\nWhile there is significant catch up needed, it is encouraging to see the leading software teams out there testing application security on every build.\n\n## Dig deeper\n\nTo learn more about adding AppSec tests to your CI build, join me at my [How Security Belongs in DevOps](https://sched.co/dUWD) talk at GitLab Commit on August 26th. You can also always sign up for a [free StackHawk trial or demo](https://www.stackhawk.com) or talk to your GitLab sales representative about the security features in GitLab Ultimate. And for the best of both worlds, check out more details on running [automated security testing with StackHawk in GitLab](https://docs.stackhawk.com/continuous-integration/gitlab.html).\n\n_Joni Klippert is founder & CEO of StackHawk, a software-as-a-service company built to help developers find and fix security vulnerabilities in their code. Joni has been building software for developers for more than 10 years, previously serving as VP Product, VictorOps from seed stage to acquisition by Splunk. Joni is a Colorado native and holds an MBA from the University of Colorado. She currently lives in Denver with her fiance Jason and Whippet \"Q\"._\n\nCover image by [Adi Goldstein](https://unsplash.com/@adigold1) on [Unsplash](https://unsplash.com)\n{: .note}\n\n\n\n",[9,873,721,697,874,875],"collaboration","testing","workflow",{"slug":877,"featured":6,"template":700},"align-engineering-security-appsec-tests-in-ci","content:en-us:blog:align-engineering-security-appsec-tests-in-ci.yml","Align Engineering Security Appsec Tests In Ci","en-us/blog/align-engineering-security-appsec-tests-in-ci.yml","en-us/blog/align-engineering-security-appsec-tests-in-ci",{"_path":883,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":884,"content":890,"config":897,"_id":899,"_type":14,"title":900,"_source":16,"_file":901,"_stem":902,"_extension":19},"/en-us/blog/all-aboard-merge-trains",{"title":885,"description":886,"ogTitle":885,"ogDescription":886,"noIndex":6,"ogImage":887,"ogUrl":888,"ogSiteName":685,"ogType":686,"canonicalUrls":888,"schema":889},"How starting merge trains improve efficiency for DevOps","No more queuing and waiting for pipeline results! Read how merge trains will speed up your deployments while making sure master stays green.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678419/Blog/Hero%20Images/merge_trains.jpg","https://about.gitlab.com/blog/all-aboard-merge-trains","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How starting merge trains improve efficiency for DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Orit Golowinski\"}],\n        \"datePublished\": \"2020-01-30\",\n      }",{"title":885,"description":886,"authors":891,"heroImage":887,"date":893,"body":894,"category":718,"tags":895},[892],"Orit Golowinski","2020-01-30","\nA large percentage of a developer's day is spent updating their branches and rebasing, they are essentially \"racing\" their teammates to get their merge requests merged. Keeping the master branch green is critical for [continuous delivery](/topics/continuous-delivery/). When the production build breaks, it means your new code isn't going live, which impacts users and revenue. The only way to be 100% sure the master branch stays green when new code merges is to run the pipeline using the latest version of the master branch. For teams that have a high volume of merges, this can be difficult or even impossible. In the time it takes the pipeline to complete one code change, other changes can get merged to master with the potential for conflict. The only way to mitigate this is to queue and sequence the changes so that once a production pipeline starts, other code doesn't get merged ahead of that change. \n\n## What are merge trains and how do they help?\n\n Merge trains introduce a way to order the flow of changes into the target branch (usually master). When you have teams with a high number of changes in the target branch, this can cause a situation where during the time it takes to validate merged code for one change, another change has been merged to master, invalidating the previous merged result.\n\nBy using merge trains, each merge request joins as the last item in that train with each merge request being processed in order. However, instead of queuing and waiting, each item takes the completed state of the previous (pending) [merge ref](https://gitlab.com/gitlab-org/gitlab-foss/issues/47110) (the merge result of the merge), adds its own changes, and starts the pipeline immediately in parallel under the assumption that everything is going to pass.\n\nIf all pipelines in the merge train are completed successfully, then no pipeline time is wasted on queuing or retrying. Pipelines invalidated through failures are immediately canceled, the MR causing the failure is removed, and the rest of the MRs in the train are requeued without the need for manual intervention.\n\nAn example of a merge train:\n\n![Diagram of merge trains](https://about.gitlab.com/images/blogimages/merge_trains-1.png){: .shadow}\n\nMR1 and MR2 join a merge train. When MR3 attempts to join, the merge fails and it is removed from the merge train. MR4 restarts at the point that MR3 fails, and attempts to run without the contents of MR3.\nMR3 will remain open in failed state, so that the author can rebase and fix the failure before attempting to merge again.\n\nHere is a demonstration video that explains the advantage of the merge train feature. In this video, we'll simulate the common problem in a workflow without merge trains, and later, we resolve the problem by enabling a merge train.\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/D4qCqXgZkHQ\" frameborder=\"0\" allowfullscreen=\"true\">\n\u003C/iframe>\n\u003C/figure>\n\n## How the merge trains feature has evolved so far\n\nAfter releasing [merge trains](/releases/2019/06/22/gitlab-12-0-released/#sequential-merge-trains) in GitLab 12.0, we immediately started to use this feature internally, and collected a lot of valuable feedback which helped us to improve and enhance the feature.\n\nWe started by tuning the [merge train concurrency](https://gitlab.com/gitlab-org/gitlab/issues/31692). We understood that while merge trains is a feature that is designed to improve efficiency by making sure that master stays green, it can also create an unwanted bottleneck that slows down productivity if your merge requests needs to wait in a long queue in order to get merged.\n\nWe also noticed that many developers were \"skipping the line\" and merging their changes immediately because they did not understand the effect that merging immediately has on other users, so we added a [warning](https://gitlab.com/gitlab-org/gitlab/issues/12679) to clarify this common misunderstanding. We intentionally left the option to still \"merge immediately\" since we also understand the importance of an urgent merge request, such as a \"hot fix\" that must be able to skip to the front of the merge train. Another improvement was the ability to [“squash & merge” as part of the merge train](https://gitlab.com/gitlab-org/gitlab/issues/13001) in order to maintain a clean commit history.\n\nHere is a demonstration video that explains how squash & merge works with merge trains.\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/pA5SfHwlq0s\" frameborder=\"0\" allowfullscreen=\"true\">\n\u003C/iframe>\n\u003C/figure>\n\n## What's next\n\nWe plan to add more important features to the support of merge trains. The first is that [merge trains should support fast-forward merge](https://gitlab.com/gitlab-org/gitlab/issues/35628). This could help solve a fundamental contention problem of fast-forward merges: The CI pipeline must be run every time the merge request is rebased, and the merge request must be rebased every time master changes – which is frequently! This problem significantly limits the frequency with which merge requests can be merged.\n\nThe second feature, [API support for merge trains](https://gitlab.com/gitlab-org/gitlab/issues/32665), will extend the ability to automate your workflows using merge trains.\n\nWe want to hear from you! Tell us how merge trains have improved your workflow, or give us more insight into how we can improve merge trains to work better for you. [Give us your feedback by commenting here](https://gitlab.com/groups/gitlab-org/-/epics/2408).\n\nCover image by [Vidar Nordli-Mathisen\n](https://images.unsplash.com/photo-1525349769815-0e6ba4e0bbdd?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1611&q=80) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9,896,875,695],"demo",{"slug":898,"featured":6,"template":700},"all-aboard-merge-trains","content:en-us:blog:all-aboard-merge-trains.yml","All Aboard Merge Trains","en-us/blog/all-aboard-merge-trains.yml","en-us/blog/all-aboard-merge-trains",{"_path":904,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":905,"content":911,"config":918,"_id":920,"_type":14,"title":921,"_source":16,"_file":922,"_stem":923,"_extension":19},"/en-us/blog/android-cicd-with-gitlab",{"title":906,"description":907,"ogTitle":906,"ogDescription":907,"noIndex":6,"ogImage":908,"ogUrl":909,"ogSiteName":685,"ogType":686,"canonicalUrls":909,"schema":910},"Tutorial: Android CI/CD with GitLab","Learn how to create an automated Android CI/CD pipeline using GitLab and fastlane.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669316/Blog/Hero%20Images/angela-compagnone-4Iyg6cNU7sI-unsplash.jpg","https://about.gitlab.com/blog/android-cicd-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Android CI/CD with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darby Frey\"}],\n        \"datePublished\": \"2023-06-13\",\n      }",{"title":906,"description":907,"authors":912,"heroImage":908,"date":914,"body":915,"category":718,"tags":916},[913],"Darby Frey","2023-06-13","\n\nMention the word keystore and all Android developers in a 5km radius will suddenly have a small feeling of panic. Attempting to automate a [CI/CD](https://docs.gitlab.com/ee/ci/) pipeline to deploy an app can be frustrating, and configuring Google Play access and code signing is at the heart of the problem.\n\nBut fear not! GitLab Mobile DevOps is here to make this process easier and faster, and I am here to guide you.\n\n[GitLab Mobile DevOps](https://docs.gitlab.com/ee/ci/mobile_devops.html) is a collection of features built right into GitLab to solve the biggest challenges mobile teams face in establishing a DevOps practice.\n\nIn this blog post, I’ll demonstrate how to set up an automated CI/CD pipeline using GitLab and [fastlane](https://fastlane.tools/).\n\n## Prerequisites \nTo get started, there are a few prerequisites you’ll need:\n\n* A Google Play developer account - [https://play.google.com/console](https://play.google.com/console)\n* Ruby and Android Studio installed on your local machine [https://docs.fastlane.tools/getting-started/android/setup/](https://docs.fastlane.tools/getting-started/android/setup/)\n\n> Try your hand at the [iOS CI/CD for GitLab tutorial](https://about.gitlab.com/blog/ios-cicd-with-gitlab/)\n\n## Reference project\nFor this tutorial, we’ll use the Android demo project for reference: [https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/android_demo](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/android_demo).\n\n## Install fastlane\nIf you haven’t done so yet, the first step will be to install fastlane. Do this by creating a file in the root of your project called `Gemfile`. Give it the following contents:\n\n```ruby\nsource \"https://rubygems.org\"\n\ngem \"fastlane\"\n```\n\nThen, from the terminal in your project, run:\n\n```\nbundle install.\n```\n\nThis command will install fastlane, and all of its related dependencies.\n\n## Initialize fastlane\nNow that fastlane is installed, we can set it up for our project. Run the following command from the terminal in your project. You’ll be asked to enter your package name, so enter that. When prompted for the JSON secret file, you can skip that for now, and you can answer \"no\" to the questions about metadata management.\n\n```\nbundle exec fastlane init\n```\n\n![Initialize fastlane](https://about.gitlab.com/images/blogimages/2023-06-13-android-cicd-with-gitlab/fastlane-init.png)\n\nRunning this command will create a new folder in your project called `fastlane`. This folder will contain two files `Appfile` and `Fastfile`.\n\nThe Appfile contains the configuration information for the app, and the Fastfile has some sample code that we will replace later. See the fastlane docs for more information about the configuration details in the Appfile: [https://docs.fastlane.tools/advanced/Appfile/](https://docs.fastlane.tools/advanced/Appfile/).\n\n## Code signing\nNext are the steps for code signing.\n\n### Create a keystore\nThe next step is to create a keystore and properties files for code signing. Run the following command to generate a keystore in the project root called `release-keystore.jks`:\n\n```\nkeytool -genkey -v -keystore release-keystore.jks -storepass password -alias release -keypass password -keyalg RSA -keysize 2048 -validity 10000\n```\n\n![Create a keystore](https://about.gitlab.com/images/blogimages/2023-06-13-android-cicd-with-gitlab/keytool-genkey.png)\n\nMore information is available in the [keytool docs](https://download.java.net/java/early_access/loom/docs/specs/man/keytool.html).\n\nThe next step is to create a properties file to be used by [Gradle](https://gradle.org/_). Create a file in the project root called `release-keystore.properties`, with the following contents:\n\n```\nstoreFile=../release-keystore.jks\nkeyAlias=release\nkeyPassword=password\nstorePassword=password\n```\n\nAlso, be sure to add both files to your `.gitignore` file so they aren't committed to version control.\n\n### Configure Gradle\nNext, configure Gradle to use the newly created keystore. In the `app/build.gradle` file, add the following:\n\n**1.** Right after the plugins section, add:\n\n```\ndef keystoreProperties = new Properties()\ndef keystorePropertiesFile = rootProject.file('release-keystore.properties')\nif (keystorePropertiesFile.exists()) {\n    keystoreProperties.load(new FileInputStream(keystorePropertiesFile))\n}\n```\n\n**2.** Before Build Types, add:\n\n```\nsigningConfigs {\n    release {\n   \t keyAlias keystoreProperties['keyAlias']\n   \t keyPassword keystoreProperties['keyPassword']\n   \t storeFile keystoreProperties['storeFile'] ? file(keystoreProperties['storeFile']) : null\n   \t storePassword keystoreProperties['storePassword']\n    }\n}\n```\n\n**3.** Lastly, add the signingConfig to the release build type:\n\n```\nsigningConfig signingConfigs.release\n```\n\n## Upload keystore to GitLab secure files\nNext, upload your keystore files to GitLab so they can be used in CI/CD jobs. \n\n1. On the top bar, select **Menu > Projects** and find your project.\n1. On the left sidebar, select **Settings > CI/CD**.\n1. In the Secure Files section, select **Expand**.\n1. Select **Upload File**.\n1. Find the file to upload, select **Open**, and the file upload begins immediately. The file shows up in the list when the upload is complete.\n\nDo this for both the `release-keystore.jks` file and the `release-keystore.properties` file.\n\n![Upload Secure File](https://about.gitlab.com/images/blogimages/2023-06-13-android-cicd-with-gitlab/upload-secure-file.png)\n\n![List Secure Files](https://about.gitlab.com/images/blogimages/2023-06-13-android-cicd-with-gitlab/list-secure-files.png)\n\n## Create a CI/CD pipeline\n\nWith the configuration in place, now copy the contents of the .gitlab-ci.yml and fastlane/Fastfile below to the project.\n\nThis [.gitlab-ci.yml](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/android_demo/-/blob/main/.gitlab-ci.yml) has all the configuration needed to run the test, build, and beta jobs.\nThe [fastlane/Fastfile](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/android_demo/-/blob/main/fastlane/Fastfile) is an example that can be customized to specific project settings.\n\nNote: This fastlane configuration uses plugins. See the [docs](https://docs.fastlane.tools/plugins/using-plugins/) for instructions on how to configure your project for fastlane plugins.\n\n## Create an app in the Google Play Console\nNext, generate a build of your app locally and upload it to seed a new app entry in the Google Play Console. Run the following command locally:\n\n```\nbundle exec fastlane build\n```\n\nThis command will create a signed build of the app at\n\n```\nbuild/outputs/bundle/release/app-release.aab\n```\n\nWith the signed build ready to go, log in to the [Google Play Console](https://play.google.com/console) and create a new app and seed it with the initial build.\n\n## Configure Google Play integration\nThe last thing to set up is the Google Play integration in GitLab. To do so, first, create a Google service account.\n\n### Create a Google service account\nFollow the [instructions](https://docs.fastlane.tools/actions/supply/#setup) for setting up a service account in Google Cloud Platform and granting that account access to the project in Google Play.\n\n### Enable Google Play integration\nFollow the [instructions](https://docs.gitlab.com/ee/user/project/integrations/google_play.html) for configuring the Google Play integration by providing a package name and the JSON key file just generated for the service account.\n\nThis is a simplified CI/CD configuration that created three CI/CD jobs to run each of the lanes in fastlane on the GitLab Runners. The test and build jobs will run for all CI/CD pipelines, and the beta job will only be run on CI/CD pipelines on the main branch. The beta job is manually triggered, so you can control when the beta release is pushed to Google Play. \n\nWith these configurations in place, commit all of these changes and push them up to your project. The CI/CD pipeline will kick off, and you can see these jobs in action.\n",[721,9,917],"tutorial",{"slug":919,"featured":6,"template":700},"android-cicd-with-gitlab","content:en-us:blog:android-cicd-with-gitlab.yml","Android Cicd With Gitlab","en-us/blog/android-cicd-with-gitlab.yml","en-us/blog/android-cicd-with-gitlab",{"_path":925,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":926,"content":933,"config":940,"_id":942,"_type":14,"title":943,"_source":16,"_file":944,"_stem":945,"_extension":19},"/en-us/blog/android-publishing-with-gitlab-and-fastlane",{"title":927,"description":928,"ogTitle":929,"ogDescription":928,"noIndex":6,"ogImage":930,"ogUrl":931,"ogSiteName":685,"ogType":686,"canonicalUrls":931,"schema":932},"Publishing Android apps to Play Store with GitLab & fastlane","See how GitLab, together with fastlane, can build, sign, and publish apps for Android to the Google Play Store.","HPublishing Android apps to Play Store with GitLab & fastlane","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679918/Blog/Hero%20Images/android-fastlane-pipeline.png","https://about.gitlab.com/blog/android-publishing-with-gitlab-and-fastlane","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to publish Android apps to the Google Play Store with GitLab and fastlane\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2019-01-28\",\n      }",{"title":934,"description":928,"authors":935,"heroImage":930,"date":936,"body":937,"category":718,"tags":938},"How to publish Android apps to the Google Play Store with GitLab and fastlane",[803],"2019-01-28","When we heard about [_fastlane_](https://fastlane.tools), an app automation\ntool for delivering iOS and Android builds, we wanted to give it a spin to\nsee if a combination of GitLab and _fastlane_ could help us bring our mobile\nbuild and deployment automation to the next level and make mobile\ndevelopment a bit easier. You can see an [actual production\ndeployment](https://gitlab.com/gitlab-org/gitter/gitter-android-app/pipelines/40768761)\nof the [Gitter Android\napp](https://gitlab.com/gitlab-org/gitter/gitter-android-app) that uses what\nwe'll be implementing in this blog post; suffice to say, the results were\nfantastic and we've become big believers that the combination of GitLab and\n_fastlane_ is a truly game-changing way for developers to [enable\nCI/CD](/topics/ci-cd/) (continuous integration and continuous delivery) for\ntheir mobile applications. With GitLab and _fastlane_ we're getting, with\nminimal effort:\n\n\n- Source control, project home, issue tracking, and everything else that\ncomes with GitLab.\n\n- Content and images (metadata) for Google Play Store listing managed in\nsource control.\n\n- Automatic signing, version numbers, and changelog.\n\n- Automatic publishing to `internal` distribution channel in Google Play\nStore.\n\n- Manual promotion through `alpha`, `beta`, and `production` channels.\n\n- Containerized build environment, available in GitLab's container registry.\n\n\nIf you'd like to jump ahead and see the finished product, you can take a\nlook at the already-completed Gitter for Android\n[.gitlab-ci.yml](https://gitlab.com/gitlab-org/gitter/gitter-android-app/blob/master/.gitlab-ci.yml),\n[build.gradle](https://gitlab.com/gitlab-org/gitter/gitter-android-app/blob/master/app/build.gradle),\n[Dockerfile](https://gitlab.com/gitlab-org/gitter/gitter-android-app/blob/master/Dockerfile),\nand [_fastlane_\nconfiguration](https://gitlab.com/gitlab-org/gitter/gitter-android-app/tree/master/fastlane).\n\n\n## Configuring _fastlane_\n\n\nWe'll begin first by setting up _fastlane_ in our project, make a couple key\nchanges to our Gradle configuration, and then wrap everything up in a GitLab\npipeline.\n\n\n_fastlane_ has pretty good\n[documentation](https://docs.fastlane.tools/getting-started/android/setup/)\nto get you started, and if you run into platform-specific trouble it's the\nfirst place to check, but to get under way you really just need to complete\na few straightforward steps.\n\n\n### Initializing your project\n\n\nFirst up, you need to get _fastlane_ installed locally and initialize your\nproduct. We're using the Ruby `fastlane` gem so you'll need Ruby on your\nsystem for this to work. You can read about [other install options in the\n_fastlane_\ndocumentation](https://docs.fastlane.tools/getting-started/android/setup/).\n\n\n``` ruby\n\nsource \"https://rubygems.org\"\n\n\ngem \"fastlane\"\n\n```\n\n\nOnce your Gemfile is updated, you can run `bundle update` to update/generate\nyour `Gemfile.lock`. From this point you can run _fastlane_ by typing\n`bundle exec fastlane`. Later, you'll see that in CI/CD we use `bundle\ninstall ...` to ensure the command runs within the context of our project\nenvironment.\n\n\nNow that we have _fastlane_ ready to run, we just need to initialize our\nrepo with our configuration. Run `bundle exec fastlane init` from within\nyour project directory, answer a few questions, and _fastlane_ will create a\nnew `./fastlane` directory containing its configuration.\n\n\n### Setting up _supply_\n\n\n_supply_ is a feature built into _fastlane_ which will help you manage\nscreenshots, descriptions, and other localized metadata/assets for\npublishing to the Google Play Store.\n\n\nPlease refer to these [detailed instructions for collecting the credentials\nnecessary to run\n_supply_](https://docs.fastlane.tools/getting-started/android/setup/#setting-up-supply).\n\n\nOnce you've set this up, simply run `bundle exec fastlane supply init` and\nall your current metadata will be downloaded from your store listing and\nsaved in `fastlane/metadata/android`. From this point you're able to manage\nall of your store content as-code; when we publish a new version to the\nstore later, the versions of content checked into your source repo will be\nused to populate the entry.\n\n\n### Appfile\n\n\nThe `./fastlane/Appfile` is pretty straightforward, and contains basic\nconfiguration you chose when you initialized your project. Later we'll see\nhow to inject the `json_key_file` in your CI/CD pipeline at runtime.\n\n\n`./fastlane/Appfile`\n\n``` yaml\n\njson_key_file(\"~/google_play_api_key.json\") # Path to the json secret file -\nFollow https://docs.fastlane.tools/actions/supply/#setup to get one\n\npackage_name(\"im.gitter.gitter\") # e.g. com.krausefx.app\n\n```\n\n\n### Fastfile\n\n\nThe `./fastlane/Fastfile` is more interesting, and contains the first\nchanges you'll see that we made for Gitter vs. the default one created when\nyou run `bundle exec fastlane init`.\n\n\nThe first section contains our definitions for how we want to run builds and\ntests. As you can see, this is pretty straightforward and builds right on\ntop of your already set up Gradle tasks.\n\n\n`./fastlane/Fastfile`\n\n``` yaml\n\ndefault_platform(:android)\n\n\nplatform :android do\n\n  desc \"Builds the debug code\"\n  lane :buildDebug do\n    gradle(task: \"assembleDebug\")\n  end\n\n  desc \"Builds the release code\"\n  lane :buildRelease do\n    gradle(task: \"assembleRelease\")\n  end\n\n  desc \"Runs all the tests\"\n  lane :test do\n    gradle(task: \"test\")\n  end\n\n...\n\n```\n\n\nCreating Gradle tasks that publish/promote builds can be complicated and\nerror prone, but _fastlane_ makes this much easier by giving you pre-built\ncommands (called _fastlane_ actions) that let you perform complex tasks with\njust a few simple actions.\n\n\nIn our example, we've set up a workflow where a new build can be published\nto the internal track and then optionally promoted through alpha, beta, and\nultimately production. We initially had a new build for each track but it's\nsafer to have the same/known build go through the whole process.\n\n\n``` yaml\n\n...\n\n  desc \"Submit a new Internal Build to Play Store\"\n  lane :internal do\n    upload_to_play_store(track: 'internal', apk: 'app/build/outputs/apk/release/app-release.apk')\n  end\n\n  desc \"Promote Internal to Alpha\"\n  lane :promote_internal_to_alpha do\n    upload_to_play_store(track: 'internal', track_promote_to: 'alpha')\n  end\n\n  desc \"Promote Alpha to Beta\"\n  lane :promote_alpha_to_beta do\n    upload_to_play_store(track: 'alpha', track_promote_to: 'beta')\n  end\n\n  desc \"Promote Beta to Production\"\n  lane :promote_beta_to_production do\n    upload_to_play_store(track: 'beta', track_promote_to: 'production')\n  end\nend\n\n```\n\n\nAn important note is that we've only scratched the surface of the kinds of\nactions that _fastlane_ can automate. You can [read more about available\nactions here](https://docs.fastlane.tools/actions/), and it's even possible\nto create your own.\n\n\n## Gradle configuration\n\n\nWe also made a couple of key changes to our basic Gradle configuration to\nmake publishing easier. Nothing major here, but it does help us make things\nrun a little more smoothly.\n\n\n### Secret properties\n\n\nThe first changed section gathers the secret variables to be used for\nsigning. These are either loaded via configuration file, or gathered from\nenvironment variables in the case of CI.\n\n\n`app/build.gradle`\n\n``` groovy\n\n// Try reading secrets from file\n\ndef secretsPropertiesFile = rootProject.file(\"secrets.properties\")\n\ndef secretProperties = new Properties()\n\n\nif (secretsPropertiesFile.exists()) {\n    secretProperties.load(new FileInputStream(secretsPropertiesFile))\n}\n\n// Otherwise read from environment variables, this happens in CI\n\nelse {\n    secretProperties.setProperty(\"oauth_client_id\", \"\\\"${System.getenv('oauth_client_id')}\\\"\")\n    secretProperties.setProperty(\"oauth_client_secret\", \"\\\"${System.getenv('oauth_client_secret')}\\\"\")\n    secretProperties.setProperty(\"oauth_redirect_uri\", \"\\\"${System.getenv('oauth_redirect_uri')}\\\"\")\n    secretProperties.setProperty(\"google_project_id\", \"\\\"${System.getenv('google_project_id') ?: \"null\"}\\\"\")\n    secretProperties.setProperty(\"signing_keystore_password\", \"${System.getenv('signing_keystore_password')}\")\n    secretProperties.setProperty(\"signing_key_password\", \"${System.getenv('signing_key_password')}\")\n    secretProperties.setProperty(\"signing_key_alias\", \"${System.getenv('signing_key_alias')}\")\n}\n\n```\n\n\n### Automatic versioning\n\n\nWe also set up automatic versioning using environment variables\n`VERSION_CODE`, `VERSION_SHA`, which we will set up later in CI/CD (locally\nthey will just be `null` which is fine). Because each build's `versionCode`\nthat you submit to the Google Play Store needs to be higher than the last,\nthis makes it simple to deal with.\n\n\n`app/build.gradle`\n\n``` groovy\n\nandroid {\n    defaultConfig {\n        applicationId \"im.gitter.gitter\"\n        minSdkVersion 19\n        targetSdkVersion 26\n        versionCode Integer.valueOf(System.env.VERSION_CODE ?: 0)\n        // Manually bump the semver version part of the string as necessary\n        versionName \"3.2.0-${System.env.VERSION_SHA}\"\n```\n\n\n### Signing configuration\n\n\nFinally, we inject the signing configuration which will automatically be\nused by Gradle to sign the release build. Depending on your configuration,\nyou may already be doing this. We only worry about signing in the release\nbuild that would potentially be published to the Google Play Store.\n\n\n> When using App Signing by Google Play, you will use two keys: the app\nsigning key and the upload key. You keep the upload key and use it to sign\nyour app for upload to the Google Play Store.\n\n>\n\n>\n[*https://developer.android.com/studio/publish/app-signing#google-play-app-signing*](https://developer.android.com/studio/publish/app-signing#google-play-app-signing)\n\n\n> IMPORTANT: Google will not re-sign any of your existing or new APKs that\nare signed with the app signing key. This enables you to start testing your\napp bundle in the internal test, alpha, or beta tracks while you continue to\nrelease your existing APK in production without Google Play changing it.\n\n>\n\n>\n*`https://play.google.com/apps/publish/?account=xxx#KeyManagementPlace:p=im.gitter.gitter&appid=xxx`*\n\n\n`app/build.gradle`\n\n``` groovy\n    signingConfigs {\n        release {\n            // You need to specify either an absolute path or include the\n            // keystore file in the same directory as the build.gradle file.\n            storeFile file(\"../android-signing-keystore.jks\")\n            storePassword \"${secretProperties['signing_keystore_password']}\"\n            keyAlias \"${secretProperties['signing_key_alias']}\"\n            keyPassword \"${secretProperties['signing_key_password']}\"\n        }\n    }\n    buildTypes {\n        release {\n            minifyEnabled false\n            testCoverageEnabled false\n            proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'\n            signingConfig signingConfigs.release\n        }\n    }\n}\n\n```\n\n\n## Setting up the Docker build environment\n\n\nWe are building a Docker image to be used as a repeatable, consistent build\nenvironment which will speed things up because it will already have the\ndependencies downloaded and installed. We're just fetching a few\nprerequisites, installing the Android SDK, and then grabbing _fastlane_.\n\n\n`Dockerfile`\n\n```dockerfile\n\nFROM openjdk:8-jdk\n\n\n# Just matched `app/build.gradle`\n\nENV ANDROID_COMPILE_SDK \"26\"\n\n# Just matched `app/build.gradle`\n\nENV ANDROID_BUILD_TOOLS \"28.0.3\"\n\n# Version from https://developer.android.com/studio/releases/sdk-tools\n\nENV ANDROID_SDK_TOOLS \"24.4.1\"\n\n\nENV ANDROID_HOME /android-sdk-linux\n\nENV PATH=\"${PATH}:/android-sdk-linux/platform-tools/\"\n\n\n# install OS packages\n\nRUN apt-get --quiet update --yes\n\nRUN apt-get --quiet install --yes wget tar unzip lib32stdc++6 lib32z1\nbuild-essential ruby ruby-dev\n\n# We use this for xxd hex->binary\n\nRUN apt-get --quiet install --yes vim-common\n\n# install Android SDK\n\nRUN wget --quiet --output-document=android-sdk.tgz\nhttps://dl.google.com/android/android-sdk_r${ANDROID_SDK_TOOLS}-linux.tgz\n\nRUN tar --extract --gzip --file=android-sdk.tgz\n\nRUN echo y | android-sdk-linux/tools/android --silent update sdk --no-ui\n--all --filter android-${ANDROID_COMPILE_SDK}\n\nRUN echo y | android-sdk-linux/tools/android --silent update sdk --no-ui\n--all --filter platform-tools\n\nRUN echo y | android-sdk-linux/tools/android --silent update sdk --no-ui\n--all --filter build-tools-${ANDROID_BUILD_TOOLS}\n\nRUN echo y | android-sdk-linux/tools/android --silent update sdk --no-ui\n--all --filter extra-android-m2repository\n\nRUN echo y | android-sdk-linux/tools/android --silent update sdk --no-ui\n--all --filter extra-google-google_play_services\n\nRUN echo y | android-sdk-linux/tools/android --silent update sdk --no-ui\n--all --filter extra-google-m2repository\n\n# install Fastlane\n\nCOPY Gemfile.lock .\n\nCOPY Gemfile .\n\nRUN gem install bundle\n\nRUN bundle install\n\n```\n\n\n## Setting up GitLab\n\n\nWith our build environment ready, let's set up our `.gitlab-ci.yml` to tie\nit all together in a CI/CD pipeline.\n\n\n### Stages\n\n\nThe first thing we do is define the stages that we're going to use. We'll\nset up our build environment, do our debug and release builds, run our\ntests, deploy to internal, and then promote through alpha, beta, and\nproduction. You can see that, apart from `environment`, these map to the\nlanes we set up in our `Fastfile`.\n\n\n``` yaml\n\nstages:\n  - environment\n  - build\n  - test\n  - internal\n  - alpha\n  - beta\n  - production\n```\n\n\n### Build environment update\n\n\nNext up we're going to update our build environment, if needed. If you're\nnot familiar with `.gitlab-ci.yml` it may look like there's a lot going on\nhere, but we'll take it one step at a time. The very first thing we do is\nset up an `.updateContainerJob` yaml template which can be used to capture\nshared configuration for other steps that want to use it. In this case, it\nwill be used by the subsequent `updateContainer` and `ensureContainer` jobs.\n\n\n#### `.updateContainerJob` template\n\n\nIn this case, since we're dealing with Docker in Docker (`dind`), we are\nrunning some scripts which log into the local [GitLab container\nregistry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html),\nfetch the latest image to be used as a layer cache reference, build a new\nimage, and finally push the new version to the registry.\n\n\n``` yaml\n\n.updateContainerJob:\n  image: docker:stable\n  stage: environment\n  services:\n    - docker:dind\n  script:\n    - docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY\n    - docker pull $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG || true\n    - docker build --cache-from $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG -t $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG .\n    - docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n```\n\n\n#### `updateContainer` job\n\n\nThe first job that inherits `.updateContainerJob`, `updateContainer`, only\nruns if the `Dockerfile` was updated and will run through the template steps\ndescribed above.\n\n\n``` yaml\n\nupdateContainer:\n  extends: .updateContainerJob\n  only:\n    changes:\n      - Dockerfile\n```\n\n\n#### `ensureContainer` job\n\n\nBecause the first pipeline on a branch can fail, the `only: changes:\nDockerfile` syntax won't trigger for a subsequent pipeline after you fix\nthings. This can leave your branch without a Docker image to use. So the\n`ensureContainer` job will look for an existing image and only build one if\nit doesn't exist. The one downside to this is that both of these jobs will\nrun at the same time if it is a new branch.\n\n\nIdeally, we could just use `$CI_REGISTRY_IMAGE:master` as a fallback when\n`$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG` isn't found but there isn't any\nsyntax for this.\n\n\n``` yaml\n\nensureContainer:\n  extends: .updateContainerJob\n  allow_failure: true\n  before_script:\n    - \"mkdir -p ~/.docker && echo '{\\\"experimental\\\": \\\"enabled\\\"}' > ~/.docker/config.json\"\n    - docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY\n    # Skip update container `script` if the container already exists\n    # via https://gitlab.com/gitlab-org/gitlab-ce/issues/26866#note_97609397 -> https://stackoverflow.com/a/52077071/796832\n    - docker manifest inspect $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG > /dev/null && exit || true\n```\n\n\n### Build and test\n\n\nWith our build environment ready, we're ready to build our `debug` and\n`release` targets. Similar to above, we use a template to set up repeated\nsteps within our build jobs, avoiding duplication. Within this section, the\nfirst thing we do is set the image to the build environment container image\nwe built in the previous step.\n\n\n#### `.build_job` template\n\n\n``` yaml\n\n.build_job:\n  image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n  stage: build\n\n...\n\n```\n\n\nNext up is a step that's specific to Gitter, but if you use shared assets\nbetween a iOS and Android build you might consider doing something similar.\nWhat we're doing here is grabbing the latest mobile artifacts built by the\nweb application pipeline and placing them in the appropriate location.\n\n\n``` yaml\n  before_script:\n    - wget --output-document=artifacts.zip --quiet \"https://gitlab.com/gitlab-org/gitter/webapp/-/jobs/artifacts/master/download?job=mobile-asset-build\"\n    - unzip artifacts.zip\n    - mkdir -p app/src/main/assets/www\n    - mv output/android/www/* app/src/main/assets/www/\n```\n\n\nNext, we use [project-level\nvariables](https://docs.gitlab.com/ee/ci/variables/) containing a binary\n(hex) dump of our signing keystore file and convert it back to a binary\nfile. This allows us to inject the file into the build at runtime instead of\nchecking it into source control, a potential security concern. To get the\n`signing_jks_file_hex` variable hex value, we use this binary -> hex\ncommand, `xxd -p gitter-android-app.jks`\n\n\n``` yaml\n    # We store this binary file in a variable as hex with this command, `xxd -p gitter-android-app.jks`\n    # Then we convert the hex back to a binary file\n    - echo \"$signing_jks_file_hex\" | xxd -r -p - > android-signing-keystore.jks\n```\n\n\nHere we're setting the version at runtime – these environment variables will\nbe used by the Gradle build as implemented above. Because `$CI_PIPELINE_IID`\nincrements on each pipeline, we can guarantee our `versionCode` is always\nhigher than the last and be able to publish to the Google Play Store.\n\n\n``` yaml\n    # We add 100 to get this high enough above current versionCodes that are published\n    - \"export VERSION_CODE=$((100 + $CI_PIPELINE_IID)) && echo $VERSION_CODE\"\n    - \"export VERSION_SHA=`echo ${CI_COMMIT_SHORT_SHA}` && echo $VERSION_SHA\"\n```\n\n\nNext, we automatically generate a changelog to include by copying whatever\nyou have in `CURRENT_VERSION.txt` to the current `\u003CversionCode>.text`. You\ncan update `CURRENT_VERSION.txt` as necessary. I won't dive into the details\nof the merge request (MR) creation script here since it's somewhat specific\nto Gitter, but if you're interested in how something like this might work\ncheck out the [`create-changlog-mr.sh`\nscript](https://gitlab.com/gitlab-org/gitter/gitter-android-app/blob/master/ci-scripts/create-changlog-mr.sh).\n\n\n``` yaml\n    # Make the changelog\n    - cp ./fastlane/metadata/android/en-GB/changelogs/CURRENT_VERSION.txt \"./fastlane/metadata/android/en-GB/changelogs/$VERSION_CODE.txt\"\n    # We allow the remote push and MR creation to fail because the other job could create it\n    # and it's not strictly necessary (we just need the file locally for the CI/CD build)\n    - ./ci-scripts/create-changlog-mr.sh || true\n    # Because we allow the MR creation to fail, just make sure we are back in the right repo state\n    - git checkout \"$CI_COMMIT_SHA\"\n```\n\n\nJust a couple of final items: First, whenever a build job is done, we remove\nthe jks file just to be sure it doesn't get saved to artifacts, and second\nwe set up the artifact directory from where the output of the build (`.apk`)\nwill be saved.\n\n\n``` yaml\n  after_script:\n    - rm android-signing-keystore.jks || true\n  artifacts:\n    paths:\n    - app/build/outputs\n```\n\n\n#### `buildDebug` and `buildRelease` jobs\n\n\nMost of the complexity here was set up in the template, so as you can see\nour `buildDebug` and `buildRelease` job definitions are very clear. Both\njust call the appropriate _fastlane_ task (which, if you remember, then\ncalls the appropriate Gradle task). The `buildRelease` output is associated\nwith the `production` environment so we can define an extra\nproduction-scoped set of [project-level\nvariables](https://docs.gitlab.com/ee/ci/variables/) which are different\nfrom our testing variables.\n\n\nSince we set up code signing in the Gradle config (`build.gradle`) earlier,\nwe can be confident here that our `release` builds are appropriately signed\nand ready for publishing.\n\n\n```\n\nbuildDebug:\n  extends: .build_job\n  script:\n    - bundle exec fastlane buildDebug\n\nbuildRelease:\n  extends: .build_job\n  script:\n    - bundle exec fastlane buildRelease\n  environment:\n    name: production\n```\n\n\nTesting is really just another instance of the same thing, but instead of\ncalling one of the build lanes we call the test lane. Note that we are using\na `dependency` from the `buildDebug` job to ensure we don't need to rebuild\nanything.\n\n\n``` yaml\n\ntestDebug:\n  image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n  stage: test\n  dependencies:\n    - buildDebug\n  script:\n    - bundle exec fastlane test\n```\n\n\n### Publish\n\n\nNow that our code is being built, we're ready to publish to the Google Play\nStore. We only *publish* to the `internal` testing track and *promote* this\nsame build to the rest of the tracks.\n\n\nThis is achieved through the _fastlane_ integration, using a pre-built\naction to handle the job. In this case we are using a `dependency` on the\n`buildRelease` job, and creating a local copy of the Google API JSON keyfile\n(again stored in a [project-level\nvariable](https://docs.gitlab.com/ee/ci/variables/) instead of checking it\ninto source control.) We have this job (and all subsequent jobs) set to run\nonly on `manual` action so we have full human control/intervention from this\npoint forward. If you prefer to continuously deliver to your `internal`\ntrack you'd simply need to remove the `when: manual` entry and you'd have\nachieved your goal.\n\n\nIf you're like me, this may seem too easy to work. With everything we've\nconfigured in GitLab and _fastlane_ to this point, it's really this simple!\n\n\n``` yaml\n\npublishInternal:\n  image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n  stage: internal\n  dependencies:\n    - buildRelease\n  when: manual\n  before_script:\n    - echo $google_play_service_account_api_key_json > ~/google_play_api_key.json\n  after_script:\n    - rm ~/google_play_api_key.json\n  script:\n    - bundle exec fastlane internal\n```\n\n\n### Promote\n\n\nAs indicated earlier, promotion through alpha, beta, and production are all\n`manual` jobs. If internal testing is good, it can be promoted one step\nforward in sequence all the way through to production using these manual\njobs.\n\n\nIf you're with me to this point, there's really nothing new here and this\nreally highlights the power of GitLab with _fastlane_. We have a\n`.promote_job` template job which creates the local Google API JSON key file\nand the promote jobs themselves are basically identical.\n\n\n``` yaml\n\n.promote_job:\n  image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n  when: manual\n  dependencies: []\n  only:\n    - master\n  before_script:\n    - echo $google_play_service_account_api_key_json > ~/google_play_api_key.json\n  after_script:\n    - rm ~/google_play_api_key.json\n\npromoteAlpha:\n  extends: .promote_job\n  stage: alpha\n  script:\n    - bundle exec fastlane promote_internal_to_alpha\n\npromoteBeta:\n  extends: .promote_job\n  stage: beta\n  script:\n    - bundle exec fastlane promote_alpha_to_beta\n\npromoteProduction:\n  extends: .promote_job\n  stage: production\n  script:\n    - bundle exec fastlane promote_beta_to_production\n```\n\n\nNote that we're `only` allowing production promotion from the `master`\nbranch, instead of from any branch. This is to ensure that the production\nbuild uses the separate set of `production` environment variables which only\nhappens for the `buildRelease` job. We also have these [variables set as\nprotected](https://docs.gitlab.com/ee/ci/variables/#protected-variables) so\nwe can enforce that they are only used on the `master` branch which is\nprotected.\n\n\n### Variables\n\n\nThe last step is to make sure you set up the [project-level\nvariables](https://docs.gitlab.com/ee/ci/variables/) we used throughout the\nconfiguration above:\n\n - `google_play_service_account_api_key_json`: see [https://docs.fastlane.tools/getting-started/android/setup/#collect-your-google-credentials](https://docs.fastlane.tools/getting-started/android/setup/#collect-your-google-credentials)\n - `oauth_client_id`\n - `oauth_client_id`, protected, `production` environment\n - `oauth_client_secret`\n - `oauth_client_secret`, protected, `production` environment\n - `oauth_redirect_uri`\n - `oauth_redirect_uri`, protected, `production` environment\n - `signing_jks_file_hex`: `xxd -p gitter-android-app.jks`\n - `signing_key_alias`\n - `signing_key_password`\n - `signing_keystore_password`\n\nIf you are using the same [`create-changlog-mr.sh`\nscript](https://gitlab.com/gitlab-org/gitter/gitter-android-app/blob/master/ci-scripts/create-changlog-mr.sh)\nas us,\n\n - `deploy_key_android_repo`: see [https://docs.gitlab.com/ee/user/project/deploy_tokens/](https://docs.gitlab.com/ee/user/project/deploy_tokens/)\n - `gitlab_api_access_token`: see [https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html) (we use a bot user)\n\n![Project variables for Gitter for\nAndroid](https://about.gitlab.com/images/blogimages/android-fastlane-variables.png){:\n.shadow.medium.center}\n\n\n## What's next\n\n\nUsing this configuration we've got Gitter for Android building, signing,\ndeploying to our internal track, and publishing to production as frequently\nas we like. Next up will be to do the same for iOS, so watch this space for\nour next post!\n\n\nPhoto by [Patrick Tomasso](https://unsplash.com/@impatrickt) on\n[Unsplash](https://unsplash.com/photos/KGcLJwIYiac)\n\n{: .note}\n",[9,232,939,695],"google",{"slug":941,"featured":6,"template":700},"android-publishing-with-gitlab-and-fastlane","content:en-us:blog:android-publishing-with-gitlab-and-fastlane.yml","Android Publishing With Gitlab And Fastlane","en-us/blog/android-publishing-with-gitlab-and-fastlane.yml","en-us/blog/android-publishing-with-gitlab-and-fastlane",{"_path":947,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":948,"content":954,"config":959,"_id":961,"_type":14,"title":962,"_source":16,"_file":963,"_stem":964,"_extension":19},"/en-us/blog/application-modernization-examples",{"title":949,"description":950,"ogTitle":949,"ogDescription":950,"noIndex":6,"ogImage":951,"ogUrl":952,"ogSiteName":685,"ogType":686,"canonicalUrls":952,"schema":953},"Examples of legacy modernisation projects","Discover how four teams committed to the application modernization process.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671258/Blog/Hero%20Images/just-commit-blog-cover.png","https://about.gitlab.com/blog/application-modernization-examples","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Examples of legacy modernisation projects\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-03-14\",\n      }",{"title":949,"description":950,"authors":955,"heroImage":951,"date":956,"body":957,"category":718,"tags":958},[715],"2019-03-14","\n\nFine wine and cheese. Whiskey. Paul Rudd. There are a lot of things that get better with age – legacy systems are _not_ one of them.\n\n## The true cost of legacy systems\n\nOver time, the true cost of legacy systems is enormous: from additional resources needed to maintain them, to lost productivity, they can hinder investments in long-term growth. In highly regulated industries, they can even be a financial liability.\n[Health Insurance Portability and Accountability Act (HIPAA) violations in 2018 resulted in over $28 million in fines](https://compliancy-group.com/hipaa-fines-directory-year/), many of them due to data breaches.\nAs legacy systems grow older, it's [easy to miss critical security patches (if any are even available)](https://www.globalscape.com/blog/how-high-risk-legacy-systems-are-hurting-your-business), making your system more vulnerable to malicious actors ready to use old Java and SSL exploits to expose your network.\n\nEven if we can all agree that legacy system modernization is important, it still takes work.\n[Analysis paralysis is a real phenomenon in the digital transformation journey](/blog/beyond-application-modernization-trends/).\nRipping off the band-aid and committing to faster deployment feels overwhelming, and there are so many application modernization trends to consider. But not taking action puts a ceiling on growth.\n\n## Status quo \u003C Innovation\n\nMany large enterprises feel tied down to current practices because there just aren't enough resources left to innovate once legacy systems are maintained.\nFor example, [the greater part of the IT-related federal budget of the United States ($80 billion) goes to maintaining legacy systems.](https://www.spiria.com/en/blog/method-and-best-practices/cost-legacy-systems/)\nWhen large companies can only devote 20 percent of their budget to software modernization, things move even more slowly.\nObsolete systems create a vicious cycle where enterprises feel they have to choose between innovation or keeping things running.\n\nInstead of focusing on a full rip-and-replace of legacy systems, an application modernization strategy that identifies specific challenges reduces potential disruptions.\nMaking goals and achieving them one step at a time can make a big impact.\n\n## How to modernize applications\n\nThese examples of legacy application modernization show how four teams identified challenges, set manageable goals, and decided to [#JustCommit](https://twitter.com/search?q=just+commit) to development efficiency.\n\n### 1. Leveraging microservices\n\nWith a monolithic architecture, everything is developed, deployed, and scaled together.\nWith microservices, each component is broken out and deployed individually as services and the services communicate with each other via API calls.\n[Leveraging microservices allows teams to deploy faster and achieve scale, all at a lower cost](/topics/microservices/).\nAsk Media Group recently participated in a webcast where they discussed their transition from monoliths to microservices leveraging containers, Kubernetes, and AWS.\n\n[Watch the webcast](/webcast/cloud-native-transformation/)\n{: .alert .alert-gitlab-purple}\n\n### 2. Improving automation\n\nEquinix, a leading global data center company with over 180+ colocation facilities across five continents, wanted a solution that would help developers code better and faster, to bring customers new features quickly.\nWhile their old system was fine in the beginning, they needed a more robust solution that could meet their enterprise control and scaling needs. See how Equinix increased the agility of their developers, without sacrificing quality, through automation.\n\n{: .alert .alert-gitlab-purple}\n\n### 3. Simplifying the toolchain\n\nGoldman Sachs, one of the largest financial institutions in the world with over $1.5 trillion in assets, had some challenges in their technology division.\nAs a critical center of the financial provider's business, speed is essential, but a complex toolchain with too many parts was slowing them down.\nIn order to have faster deployment cycles and increase concurrent development, they knew they needed to simplify their toolchain. One cohesive environment helped them improve visibility and efficiency.\n\n[Read the case study](/customers/goldman-sachs/)\n{: .alert .alert-gitlab-purple}\n\n### 4. Reducing lifecycles\n\nChris Hill, Head of Systems Engineering for Infotainment at Jaguar Land Rover, shared his team's journey from feedback loops of 4-6 weeks to _just 30 minutes_ at the DevOps Enterprise Summit London in 2018.\nWho says you need to be stuck with a traditional release cadence?\n\n[Watch the presentation](/blog/chris-hill-devops-enterprise-summit-talk/)\n{: .alert .alert-gitlab-purple}\n\nAre you ready to tackle application modernization? [Just commit.](/blog/application-modernization-best-practices/)\n",[9,721,830],{"slug":960,"featured":6,"template":700},"application-modernization-examples","content:en-us:blog:application-modernization-examples.yml","Application Modernization Examples","en-us/blog/application-modernization-examples.yml","en-us/blog/application-modernization-examples",{"_path":966,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":967,"content":973,"config":980,"_id":982,"_type":14,"title":983,"_source":16,"_file":984,"_stem":985,"_extension":19},"/en-us/blog/arctic-engine-fuzz-testing-blog",{"title":968,"description":969,"ogTitle":968,"ogDescription":969,"noIndex":6,"ogImage":970,"ogUrl":971,"ogSiteName":685,"ogType":686,"canonicalUrls":971,"schema":972},"How Arctic Engine uses GitLab's fuzz testing","Using GitLab's fuzz testing, we discovered and fixed various real defects that could crash our software. Now we can detect vulnerabilities before merging the code.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681504/Blog/Hero%20Images/arcticengine.png","https://about.gitlab.com/blog/arctic-engine-fuzz-testing-blog","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Arctic Engine uses GitLab's fuzz testing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Huldra\"}],\n        \"datePublished\": \"2020-08-19\",\n      }",{"title":968,"description":969,"authors":974,"heroImage":970,"date":976,"body":977,"category":978,"tags":979},[975],"Huldra","2020-08-19","{::options parse_block_html=\"true\" /}\n\n\n\n\n## About Arctic Engine\n\n\n[Arctic Engine](https://gitlab.com/huldra/arctic) is an open-source, free\ngame\n\nengine released under the [MIT\nlicense](https://opensource.org/licenses/MIT).\n\nArctic Engine is implemented in C++ and focuses on simplicity. Being a C++\n\nprogrammer and making games should not be joyless, disillusioning, and\n\ndiscouraging. In the '80s and '90s, a programmer could make games alone, and\n\nit was fun. Arctic Engine aims at making game development in C++ fun again.\n\n\n## Testing can be fun\n\n\nTesting the game engine is very important since games are usually no more\n\nrobust and performant than the underlying middleware or game engine. Writing\n\ntests by hand is time-consuming and disillusioning, and it may drain the fun\n\nfrom the development process. So, to my shame, I avoided writing tests in\nevery\n\nway I could. For instance, I used static analyzers to detect bugs. The\nproblem\n\nwith static analyzers was the lack of motivation to fix potential issues.\nYou\n\nmay be unsure whether a bug is really there, and it can sometimes be hard to\n\nfind a way to trigger it.\n\n\nThe other possibility was fuzz testing. I heard about fuzzing but didn't try\nit\n\nearlier because I thought it was hard to integrate with the project. I could\n\nnot be more wrong. It's amazing how little effort it takes to get fuzz\ntesting\n\nup and running with GitLab.\n\n\n## Fuzz testing and what it exposed\n\n\nThanks to [Sam Kerr](https://gitlab.com/stkerr) for proving me wrong about\n\nfuzzing by [actually\nfuzzing](https://gitlab.com/huldra/arctic/-/commit/946382569d88c3af7f4a7ea075c3c3cb18d3b06b)\n\nthe sound loader code. Arctic Engine allows loading a sound from a WAV file\nin\n\nmemory. To fuzz the loader's code, you create a small CPP file with a single\n\nfunction like this:\n\n\n```cpp\n\nextern \"C\" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size) {\n    std::shared_ptr\u003Carctic::SoundInstance> result = arctic::LoadWav(data, size);\n    return 0;\n}\n\n```\n\n\nThen you add ``-fsanitize=fuzzer`` flag to the CMakeLists.txt file and a few\n\nlines to the `.gitlab-ci.yml` file, and the fuzzing begins! You may want to\n\ndrop in a few WAV files to the corpus folder to help the fuzzer and speed up\n\nthe process, but that's optional. Ok, it was a little harder than that with\nthe\n\nArctic Engine because it would output a message and quit upon processing\n\nunsupported file formats. Still, handling file loading errors this way was a\n\nbad idea, and I finally had a reason to fix it.\n\n\nThe fuzzer started crashing Arctic Engine: first, it triggered a signed\ninteger\n\noverflow, a division by zero, and a buffer overrun. And then, the wave\nloader\n\ngot out-of-memory while trying to resample a tiny WAV file with a sampling\nrate\n\nof 1 sample per second to 44100 samples per second. Wow.\n\n\nWhat I liked about fuzzing is that fuzzer actually crashes your program and\n\nprovides you the input so you can reproduce the crash. And once you've set\nup\n\nthe test harness, the entire testing process is fully automated, saving you\n\ntime and effort. It's like having a personal QA team, you commit your code,\nand\n\nin a few minutes, you already have it tests-covered.\n\n\nThen I fuzzed the CSV and the TGA file parsers and expected to find some\nbugs\n\nin the CSV and none in the TGA. What can I say? You may not find bugs where\nyou\n\nexpect them to be and find bugs where you thought there were none. The TGA\n\nloader crashed immediately with a buffer overrun. It did not account for\nfiles\n\ncontaining only a valid header but no actual image data after it.\n\n\n## Plans\n\n\nI will add a simple HTTP web server and some multiplayer network interaction\n\ncode to the Arctic Engine. I was putting it off for quite a while now\nbecause I\n\nthought testing would be a pain. Now that I know how easy it is to apply\n\nGitLab's fuzz testing to any data processing code, I'm very optimistic and\n\nsomewhat challenged. Like \"Can I make it withstand the fuzzer from the first\ntry?\".\n\nIt makes writing code fun for me once again.\n\n\n## Further reading\n\n\n- [GitLab's coverage-guided fuzz testing\ndocumentation](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/#coverage-guided-fuzz-testing)\n\n- [GitLab's Fuzzing 101\nplaylist](https://www.youtube.com/playlist?list=PL05JrBw4t0KoYzW1CR-g1rMc9Xgmnhjfe)\n\n\n### About the guest author\n\n\nHuldra is a senior videogame programmer by day maintainer of the [Arctic\nEngine](https://gitlab.com/huldra/arctic) by night. She started it because\nshe wanted a game engine that kept simple things simple and made complex\nthings possible.\n","unfiltered",[9,827,697,874,763,874],{"slug":981,"featured":6,"template":700},"arctic-engine-fuzz-testing-blog","content:en-us:blog:arctic-engine-fuzz-testing-blog.yml","Arctic Engine Fuzz Testing Blog","en-us/blog/arctic-engine-fuzz-testing-blog.yml","en-us/blog/arctic-engine-fuzz-testing-blog",{"_path":987,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":988,"content":994,"config":1000,"_id":1002,"_type":14,"title":1003,"_source":16,"_file":1004,"_stem":1005,"_extension":19},"/en-us/blog/athlinks-cuts-runtime-in-half-with-giltab",{"title":989,"description":990,"ogTitle":989,"ogDescription":990,"noIndex":6,"ogImage":991,"ogUrl":992,"ogSiteName":685,"ogType":686,"canonicalUrls":992,"schema":993},"Athlinks cuts runtime in half with GitLab","Athlinks, a time management solution platform, shares how moving from Jenkins to GitLab cut CI runtimes in half.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671909/Blog/Hero%20Images/Athlinks_running.jpg","https://about.gitlab.com/blog/athlinks-cuts-runtime-in-half-with-giltab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Athlinks cuts runtime in half with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brein Matturro\"}],\n        \"datePublished\": \"2019-12-17\",\n      }",{"title":989,"description":990,"authors":995,"heroImage":991,"date":996,"body":997,"category":783,"tags":998},[823],"2019-12-17","\nIf you’ve ever run a [Spartan race](https://www.spartan.com/en), then you’ve likely used Athlinks, the only suite of time management solutions for a variety of racing events. The [Athlinks](https://www.athlinks.com) platform includes race registration, timing, scoring, results -- everything from the check-in process to the orange wrist bands worn by participants. The solution stores over 300 million race results at any given time.\n\n## Athlinks previous DevOps tools run short\n\nThe Athlinks DevOps team previously had experience with several Agile planning tools, including Jira, Rally, and VersionOne. All of the tools they tried didn’t exactly fit what the team needed. They were looking for a tool that offers transparency and a voice for other parts of the business. “(We wanted) to give transparency and a voice to what engineering is working on so that other departments can have input into what is going on,” says Christopher Annannie, engineering manager, Athlinks.\n\n## Athlinks sprint to GitLab CI from Jenkins\n\nAthlinks started using GitLab CE in 2015, migrating over from GitHub. In January of 2018, the team adopted EE and after doing a GitLab CI proof of concept, they moved to Ultimate and away from [Jenkins](/blog/migrating-from-jenkins/). “We quickly discovered that we really wanted the full suite of tools for the Agile and the product side, so we went to GitLab Ultimate,” explains Aaron Rorvig, DevOps manager.\n\nThe group previously had about 300 jobs in Jenkins and now are at less than 40. “We use a wide variety of languages and technologies -- pretty much every operating system, both Android and iOS. We’re all over the place and we use GitLab CI for all of it,” Aaron says. The Athlinks team estimates a 50% savings across the board, both in code and in time spent running jobs.\n\n## A win for Athlinks collaboration and communication\n\nSince all of the issues, the code, and [CI pipelines](/blog/defend-cicd-security/) are inside of GitLab, it provides a single view from start to finish. Each team can view all the issues and the labeling helps everyone understand what stage each project is in, how much work has been done, and what the next steps are. “GitLab is not necessarily all that opinionated about how you do issue tracking,” Christopher says. Everything can be tracked, even when the teams don’t use the same issue tracking, it can all exist in one place.\n\nThe issue templates provide structure for the all departments to understand what they need to fill out. “Engineering will get to it quicker without so much back and forth before a problem is actually solved,” Christopher says.\n\nThe communication among the marketing, DevOps and engineering teams is improving. “We’re getting marketing involved in this so we get better about communicating all the new features we’ve deployed this month, so that timers, race directors, and athletes will actually know about the work we’re doing,” Christopher says.\n\nWant to learn more about Athlink’s transition from Jenkins to GitLab? Watch the presentation here:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Dy_a79_PsNk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCover image by [Ben Stern](https://unsplash.com/@benst287) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9,873,999],"agile",{"slug":1001,"featured":6,"template":700},"athlinks-cuts-runtime-in-half-with-giltab","content:en-us:blog:athlinks-cuts-runtime-in-half-with-giltab.yml","Athlinks Cuts Runtime In Half With Giltab","en-us/blog/athlinks-cuts-runtime-in-half-with-giltab.yml","en-us/blog/athlinks-cuts-runtime-in-half-with-giltab",{"_path":1007,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1008,"content":1014,"config":1021,"_id":1023,"_type":14,"title":1024,"_source":16,"_file":1025,"_stem":1026,"_extension":19},"/en-us/blog/atlassian-server-ending-move-to-a-single-devsecops-platform",{"title":1009,"description":1010,"ogTitle":1009,"ogDescription":1010,"noIndex":6,"ogImage":1011,"ogUrl":1012,"ogSiteName":685,"ogType":686,"canonicalUrls":1012,"schema":1013},"Atlassian Server ends: Time to move to integrated DevSecOps","Atlassian is about to end support for Server products. Learn why now is the time to make the upgrade to GitLab’s single DevSecOps platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749658924/Blog/Hero%20Images/securitylifecycle-light.png","https://about.gitlab.com/blog/atlassian-server-ending-move-to-a-single-devsecops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Atlassian Server ending: Goodbye disjointed toolchain, hello DevSecOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dave Steer\"}],\n        \"datePublished\": \"2023-09-26\",\n      }",{"title":1015,"description":1010,"authors":1016,"heroImage":1011,"date":1018,"body":1019,"category":741,"tags":1020},"Atlassian Server ending: Goodbye disjointed toolchain, hello DevSecOps platform",[1017],"Dave Steer","2023-09-26","\nThe February 15, 2024, end-of-life date for Atlassian Server is fast approaching. If your software development workflows rely on on-premises deployments of Atlassian Server products such as Bitbucket Server for source code management, Bamboo Server for CI/CD, or Jira Server for Agile Planning, you’re faced with a choice. You can settle for the Atlassian options that remain available to you, or you can take a more forward-looking path: Make the move to a single AI-powered DevSecOps platform.\n\n## Atlassian Server end of life: Migration made easy\nMigrating to a DevSecOps platform is simple: With GitLab’s various importer tools you can quickly import repositories, Jira issues, and Bamboo jobs. Here are a few resources you can use to get started, whether you’re ready to adopt the whole GitLab platform or you’d like to move one service at a time:\n* [Import your project from Bitbucket Cloud to GitLab](https://docs.gitlab.com/ee/user/project/import/bitbucket.html)\n* [Import your project from Bitbucket Server to GitLab](https://docs.gitlab.com/ee/user/project/import/bitbucket_server.html)\n* [Import your Jira project issues to GitLab](https://docs.gitlab.com/ee/user/project/import/jira.html)\n* [Integrate Jira with GitLab](https://docs.gitlab.com/ee/integration/jira/)\n\nAtlassian discontinuing Server presents the perfect opportunity to consolidate your toolchain, increase developer efficiency, and implement DevSecOps. Let’s take a look at the benefits you can expect from making this move.\n\n## Less complexity, more productivity\nDevSecOps enables companies to build software faster, more efficiently, and more securely. At the same time, the proliferation of DevOps tools is creating additional complexity in how companies develop, secure, and deploy software, which in turn is costing companies time and money. GitLab’s [2023 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/) found that 84% of organizations are using between two and ten DevOps tools, with 69% of developers spending at least a quarter of their time maintaining and integrating toolchains. Even if some of those various DevOps tools are from the same vendor, they often require work in the background to set up, secure, and maintain the integrations. That’s a significant amount of effort that teams could be using to deliver value to customers.\n\nLet’s say you’re an Atlassian shop. You’re using Bitbucket Server for source code management, Bamboo Server for CI/CD, Zephyr for test case management, Jira Server for agile planning, and numerous other third-party tools for security scanning, vulnerability tracking, and more. As the Atlassian Server end of life approaches, you know that continuing to use Server without security updates and vulnerability fixes puts your company and customers at risk — a major no-go for security and compliance reasons. You have the option to move to Atlassian Cloud or, if you need to remain on-premises, Atlassian Data Center. But transitioning requires time, effort, and significant planning. If you’re already dedicating resources to making this change, why not take the opportunity to simplify matters by consolidating all of those tools into a single [AI-powered DevSecOps platform](https://about.gitlab.com/blog/categories/ai-ml/) instead?\n\nAccording to our 2023 survey, the top benefits of migrating to a DevSecOps platform include cost and time savings, increased efficiency, and better security. Plus, 90% of developers whose organizations are using a platform said they feel they’re able to effectively identify and mitigate security vulnerabilities, among other benefits.\n\n> How much is your toolchain costing you? [See how much you can save with the GitLab DevSecOps Platform](https://about.gitlab.com/calculator/roi/).\n\n## Improved developer experience\nTime spent maintaining and integrating toolchains isn’t just money down the drain for the organization — it’s also a drain on developer satisfaction. [Helping organizations better support their developers](https://about.gitlab.com/blog/the-gitlab-quarterly-how-our-latest-beta-releases-support-developers/) has always been a priority for GitLab, and we’re confident in our position that a single DevSecOps platform is the best way to do just that.\n\nWith a single platform, developers stay in flow and do what they do best: develop great software. More concretely, a superior developer experience empowers teams to:\n* focus on work that matters, with less context switching between different tools\n* onboard and get up to speed more quickly with only a single platform to learn\n* break down silos across product, development, security, and operations to foster better collaboration\n* receive continuous feedback and iterate more quickly to produce higher-quality output\n* automate manual tasks with AI built into the development lifecycle to avoid errors and wasted time\n\nIf your team hasn’t been able to invest in the developer experience, now is the perfect time to make it a priority. We’ve heard from GitLab customers like [Airbus](https://about.gitlab.com/customers/airbus/) and [Iron Mountain](https://about.gitlab.com/customers/iron-mountain/) that a toolchain consisting of Jira, Bitbucket, and Bamboo doesn’t offer a user-friendly experience and lacks key capabilities. Why migrate to a new Atlassian Cloud or Data Center setup if you’re going to be missing out on mission-critical features such as [built-in security scanning](https://docs.gitlab.com/ee/user/application_security/), [review apps](https://docs.gitlab.com/ee/ci/review_apps/), and [feature flags](https://docs.gitlab.com/ee/operations/feature_flags.html)?\n\n## Visibility at every stage\nA unified DevSecOps platform offers [out-of-the-box dashboards and reports](https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard/) that provide insights on productivity, security, code quality, and more to help teams identify and fix barriers within the software development lifecycle. In our 2023 survey, respondents whose organizations are using a DevSecOps platform were significantly more likely to say they clearly understand what is happening across all stages of the software development lifecycle.\n\nHaving that extra visibility:\n* improves software delivery quality and speed by uncovering bottlenecks in software delivery\n* boosts organizational value delivery by helping organizations identify high-performing teams, maintain standards, and share best practices\n* helps organizations ensure the security of their end-to-end software supply chain and compliance with regulatory mandates\n\nYou can’t get that visibility without a single DevSecOps platform — and GitLab is the most comprehensive AI-powered DevSecOps platform on the market. With other providers, you’re still stringing together various third-party tools into complex toolchains, hampering visibility while creating integration headaches and increasing your total cost of ownership. From idea to value, GitLab lets teams collaborate in a single application to [shorten cycle times](https://about.gitlab.com/customers/hackerone/), [reduce development costs](https://about.gitlab.com/customers/carfax/), and [increase developer productivity](https://about.gitlab.com/customers/deutsche-telekom/).\n\n> Learn why GitLab was named a [Leader in the 2023 Gartner® Magic Quadrant™ for DevOps Platforms](https://about.gitlab.com/gartner-magic-quadrant/).\n\nBottom line: [With Atlassian Server support ending](https://www.atlassian.com/migration/assess/journey-to-cloud), you shouldn’t be forced to choose an option that isn’t right for the [future of your business](https://about.gitlab.com/blog/devsecops-platforms-help-smbs-scale-as-they-grow/). Whether you’re on the lookout for alternatives to fundamental tools in your stack that will no longer be supported, or you’re simply seeking new ways to boost the productivity of your growing team, adopting an AI-powered DevSecOps platform should be as frictionless as possible. That’s why GitLab offers a variety of deployment options to meet your unique needs, including self-managed, multi-tenant SaaS, and [GitLab Dedicated, our single-tenant SaaS offering](https://about.gitlab.com/blog/introducing-gitlab-dedicated/).\n\nWhen you’re ready to make the move, [we’re here to help](https://about.gitlab.com/sales/).\n",[696,9,697],{"slug":1022,"featured":6,"template":700},"atlassian-server-ending-move-to-a-single-devsecops-platform","content:en-us:blog:atlassian-server-ending-move-to-a-single-devsecops-platform.yml","Atlassian Server Ending Move To A Single Devsecops Platform","en-us/blog/atlassian-server-ending-move-to-a-single-devsecops-platform.yml","en-us/blog/atlassian-server-ending-move-to-a-single-devsecops-platform",{"_path":1028,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1029,"content":1035,"config":1043,"_id":1045,"_type":14,"title":1046,"_source":16,"_file":1047,"_stem":1048,"_extension":19},"/en-us/blog/auto-devops-explained",{"title":1030,"description":1031,"ogTitle":1030,"ogDescription":1031,"noIndex":6,"ogImage":1032,"ogUrl":1033,"ogSiteName":685,"ogType":686,"canonicalUrls":1033,"schema":1034},"Auto DevOps 101: How we’re making CI/CD easier","VP of product strategy Mark Pundsack shares everything you need to know about Auto DevOps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666915/Blog/Hero%20Images/autodevops.jpg","https://about.gitlab.com/blog/auto-devops-explained","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Auto DevOps 101: How we’re making CI/CD easier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-10-07\",\n      }",{"title":1030,"description":1031,"authors":1036,"heroImage":1032,"date":1038,"body":1039,"category":1040,"tags":1041},[1037],"Valerie Silverthorne","2019-10-07","\nContinuous integration and continuous delivery (CI/CD) are the gold standards of software development but they can be challenging to achieve. GitLab’s [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) feature is designed to make the CI/CD process much easier with baked-in best practices and automation that will move code seamlessly through the entire development lifecycle. [Mark Pundsack](/company/team/#markpundsack), VP of product strategy, demonstrated how straightforward – and customizable – Auto DevOps is during our company-wide meeting, [Contribute 2019](/blog/how-we-scaled-our-summits/). Here’s what you need to know to get started with [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/).\n\n## It’s a shift... left\n\n“Auto DevOps is a [CI/CD pipeline](/topics/ci-cd/) that we have defined for you,” Mark says. “It’s basically all these best practices that we want to encourage everybody to have, and we believe are a good baseline for software development.” The goal is to have everyone set up to do CI/CD, but not just the bare minimum CI/CD, he says. “Like most people when they create a project, they start with running tests. That's the natural thing for CI. And then maybe they'll even get into CD, but they're not going to do things like [code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html) analysis and security analysis. And we really believe in the [shift left movement](/blog/secure-containers-devops/). If you look at everything as a pipeline, we want to take security and things like that which are stuck at the end and we want to move them as far left as possible. We believe you should be checking for security even on your first deploy. So we said, okay, let's put all that in there and make a script that says this is everything that you should be doing, so let's just do it for you.”\n\nThe roots of Auto DevOps can be found in previous versions of GitLab which offered Auto Deploy. “We evolved [Auto DevOps] as the company evolved to have more and more capabilities around the DevOps lifecycle,” Mark explains. Today, Auto DevOps tackles 12 software development steps automatically. Customers wanting more flexibility can choose the [Composable Auto DevOps](/releases/2019/04/22/gitlab-11-10-released/#composable-auto-devops) option, where the template can easily be modified to suit the requirements.\n\n## The Auto DevOps process\n\nAuto DevOps begins with language detection using [Heroku buildpacks](https://devcenter.heroku.com/articles/buildpacks). While not all languages are supported, a build is created and tested automatically for the supported languages, Mark explains. Auto DevOps uses the open source version of [Code Climate](https://codeclimate.com/oss) to do code quality analysis and the results are displayed in the merge request when a change is made. After that, it’s time for security testing; including dependency scanning, license management, and container scanning. “All those things kick off again right from your first deploy,” Mark says. “We’re really taking shifting left seriously there.”\n\nAt this point developers are able to auto review applications. And once that review app is available Auto DevOps will kick off [dynamic application security testing (DAST)](https://docs.gitlab.com/ee/user/application_security/dast/). “It tries to detect security vulnerabilities in your running application,” Mark says. Finally Auto DevOps will auto deploy to either staging or production depending on how its configured. “From the first push it just automatically does all this stuff all the way – from deployment to production – which is pretty great.”\n\nAn app in production will get automatic browser performance testing which both challenges the application and records the results. [Auto monitoring](https://docs.gitlab.com/ee/topics/autodevops/#auto-monitoring) is also running so users can easily track response times, error rates, and even things like CPU and memory utilization. “All of this happens without any configuration whatsoever and that's really, that's why we put ‘auto’ in front of all of these,” Mark says. “It's really almost all the capabilities of our [DevOps lifecycle](/stages-devops-lifecycle/) thrown in by default.”\n\nWatch Mark demonstrate exactly how Auto DevOps works in the video below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/pPRF1HEtQ3s\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCover image by [Joshua Sortino](https://unsplash.com/@sortino) on [Unsplash](https://unsplash.com)\n{: .note}\n","insights",[9,721,874,1042],"production",{"slug":1044,"featured":6,"template":700},"auto-devops-explained","content:en-us:blog:auto-devops-explained.yml","Auto Devops Explained","en-us/blog/auto-devops-explained.yml","en-us/blog/auto-devops-explained",{"_path":1050,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1051,"content":1057,"config":1065,"_id":1067,"_type":14,"title":1068,"_source":16,"_file":1069,"_stem":1070,"_extension":19},"/en-us/blog/auto-devops",{"title":1052,"description":1053,"ogTitle":1052,"ogDescription":1053,"noIndex":6,"ogImage":1054,"ogUrl":1055,"ogSiteName":685,"ogType":686,"canonicalUrls":1055,"schema":1056},"What's coming for Auto DevOps","We're working on a number of improvements to GitLab Auto DevOps – here's where it's at and where it's headed.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667050/Blog/Hero%20Images/auto-devops-pipeline-stages.png","https://about.gitlab.com/blog/auto-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What's coming for Auto DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chris Ward\"}],\n        \"datePublished\": \"2020-04-30\",\n      }",{"title":1052,"description":1053,"authors":1058,"heroImage":1054,"date":1060,"body":1061,"category":1062,"tags":1063},[1059],"Chris Ward","2020-04-30","[Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) is designed to make CI/CD adoption easier, with baked-in best practices and automation to take care of moving your code seamlessly through the software development lifecycle. If you or your team are new to DevOps, this is a great place to start. We're excited to share some new and [upcoming improvements to Auto DevOps](#coming-soon), but first: \n\nThere is a prerequisite for Auto DevOps, and that's a Kubernetes cluster. This may or may not be an easy step for you to complete, but your team likely has a cluster set up already. If not, [read our getting started guide](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html).\n\nAuto DevOps should be enabled by default, but if it isn't, go to _Settings > CI/CD > Auto DevOps_ and check _Default to Auto DevOps pipeline_. There are a lot of automated stages available, depending on what version and tier of GitLab you use, and which components you add to your Kubernetes cluster.\n\n1.  **Auto Build**: Builds your code using a _Dockerfile_ if your project has one, or a [Heroku buildpack](https://elements.heroku.com/buildpacks) selected based on the programming language you use, but you can manually set it.\n2.  **Auto Test**: Runs any tests included in your codebase, again using a Heroku buildpack.\n3.  **Auto Code Quality**: Runs static analysis and other checks over your code using the [code quality image](https://gitlab.com/gitlab-org/ci-cd/codequality).\n4.  **Auto SAST (Static Application Security Testing)**: Runs static analysis checks focussed on security issues using the [SAST image](https://gitlab.com/gitlab-org/security-products/sast).\n5.  **Auto Dependency Scanning**: Checks for potential security issues on project dependencies using the [dependency scanning image](https://gitlab.com/gitlab-org/security-products/dependency-scanning). \n6.  **Auto License Compliance**: Searches project dependencies for what licenses they use, using the [license compliance image](https://gitlab.com/gitlab-org/security-products/license-management).\n7.  **Auto Container Scanning**: Uses [Clair](https://github.com/quay/clair) to run static analysis and security issue checks on any Docker images used. \n8.  **Auto Review Apps**: Creates a version of an application in a temporary environment for team members to try and review.\n9.  **Auto DAST (Dynamic Application Security Testing)**: Runs further security checks using the [OWASP ZAProxy](https://github.com/zaproxy/zaproxy) tool.\n10. **Auto Deploy**: Deploys an application to a production environment as defined in the Kubernetes environment settings.\n11. **Auto Browser Performance Testing**: Tests the performance of application web pages using the [Sitespeed.io image](https://hub.docker.com/r/sitespeedio/sitespeed.io/).\n12. **Auto Monitoring**: Uses Prometheus to monitor system metrics for a deployed application.\n\n### Recent improvement: Readiness for Kubernetes 1.16 ([#32720](https://gitlab.com/gitlab-org/gitlab/issues/32720))\n\nWe recently reworked Auto DevOps features to [match changes in the Kubernetes 1.16 API](/releases/2020/03/22/gitlab-12-9-released/#auto-devops'-default-postgresql-due-to-change). Nothing you use will change, but behind the scenes, access different API endpoints, and in different ways.\n\n## Coming soon\n\nSeveral improvements are coming to Auto DevOps in our next few releases to ensure that we help your projects conform to the latest DevOps best practices, and integrate with as many of our platform features and external tools as possible.\n\n### Cloud-native buildpacks for Auto DevOps ([#25954](https://gitlab.com/gitlab-org/gitlab/issues/25954))\n\nSince Heroku created the buildback concept in 2011 when using virtual machines was typical, others have adopted the concept, and created their own that suited containers better. This change in need resulted in the Cloud Native Computing Foundation (CNCF) accepting the [Cloud Native Buildpacks project](https://buildpacks.io/) in 2018 to maintain a standard for buildpacks that suits their modern use cases. Also, in 12.10 we've added support to Cloud Native Buildpacks, and will be switching our \"traditional\" Heroku buildpacks to these newer ones in the coming months.\n\n### Running Auto DevOps on air-gapped networks ([#25642](https://gitlab.com/gitlab-org/gitlab/issues/25642))\n\nWhile many of our users have their clusters connected to the internet, we know not all do, and want to offer these customers as many features as possible. As part of GitLab 13.0, we are researching how to give you the ability to configure the locations of dependencies for Auto DevOps stages.\n\n### Upgrade to Helm 3 ([#29038](https://gitlab.com/gitlab-org/gitlab/issues/29038))\n\nWe use [Helm](https://helm.sh/) to deploy packages needed for various stages of the Auto DevOps process. In 13.1 we will upgrade Helm to version 3, which brought a series of significant changes, including removing Tiller as the \"server\" side of Helm.\n\n### NGINX alerts to auto-monitoring in Auto DevOps ([#118788](https://gitlab.com/gitlab-org/gitlab/issues/118788))\n\nNginx is a popular HTTP and reverse proxy server. In 13.0 we will add support for the metrics it exposes to Prometheus for providing alerts to our auto-monitoring feature.\n\n### Add Merge Train support to Auto DevOps ([#121933](https://gitlab.com/gitlab-org/gitlab/issues/121933))\n\n[Merge Trains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html) are a GitLab feature that let you queue lists of merge requests waiting for merging into a target branch. Auto DevOps doesn't currently support merge trains, but in version 13.1, we will start adding support and helping users get the configuration they need to add their Merge Trains to Auto DevOps workflows.\n\nYou can [read more about merge trains here](/blog/all-aboard-merge-trains/).\n\n## Looking further ahead\n\nThese planned features aside, one other area we are looking to improve is adopting more of a Directed Acyclic Graph (DAG) approach to Auto DevOps pipelines. You will no longer have to wait for one stage to complete before another begins, and you can focus on the results of the stages important to you. Feel free to view and comment on [the open issue](https://gitlab.com/gitlab-org/gitlab/issues/33200).\n\nWe are broadly working to make Auto DevOps work seamlessly with as many other GitLab features as possible, and hope you enjoy the time and insights it gives you.\n\nYou can [read more about Auto DevOps here](/blog/auto-devops-explained/).\n","news",[9,695,1064],"inside GitLab",{"slug":1066,"featured":6,"template":700},"auto-devops","content:en-us:blog:auto-devops.yml","Auto Devops","en-us/blog/auto-devops.yml","en-us/blog/auto-devops",{"_path":1072,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1073,"content":1079,"config":1085,"_id":1087,"_type":14,"title":1088,"_source":16,"_file":1089,"_stem":1090,"_extension":19},"/en-us/blog/automating-agile-workflows-with-the-gitlab-triage-gem",{"title":1074,"description":1075,"ogTitle":1074,"ogDescription":1075,"noIndex":6,"ogImage":1076,"ogUrl":1077,"ogSiteName":685,"ogType":686,"canonicalUrls":1077,"schema":1078},"Automating Agile workflows with the gitlab-triage gem","Learn how to automate repetitive tasks like triaging issues and merge requests to free up valuable developer time in our \"Getting Started with GitLab\" series.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659525/Blog/Hero%20Images/blog-getting-started-with-gitlab-banner-0497-option4-fy25.png","https://about.gitlab.com/blog/automating-agile-workflows-with-the-gitlab-triage-gem","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automating Agile workflows with the gitlab-triage gem\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2025-03-13\",\n      }",{"title":1074,"description":1075,"authors":1080,"heroImage":1076,"date":1082,"body":1083,"category":693,"tags":1084},[1081],"GitLab","2025-03-13","*Welcome to our \"Getting started with GitLab\" series, where we help\nnewcomers get familiar with the GitLab DevSecOps platform.*\n\n\nThis post dives into the\n[`gitlab-triage`](https://gitlab.com/gitlab-org/ruby/gems/gitlab-triage)\ngem, a powerful tool that lets you create bots to automate your Agile\nworkflow. Say goodbye to manual tasks and hello to streamlined efficiency.\n\n\n## Why automate your workflow?\n\n\nEfficiency is key in software development. Automating repetitive tasks like\ntriaging issues and merge requests frees up valuable time for your team to\nfocus on what matters most: building amazing software.\n\n\nWith `gitlab-triage`, you can:\n\n\n* **Ensure consistency:** Apply labels and assign issues automatically based\non predefined rules.  \n\n* **Improve response times:** Get immediate feedback on new issues and merge\nrequests.  \n\n* **Reduce manual effort:** Eliminate the need for manual triage and\nupdates.  \n\n* **Boost productivity:** Free up your team to focus on coding and\ninnovation.\n\n\n## Introducing the `gitlab-triage` gem\n\n\nThe `gitlab-triage` gem is a Ruby library that allows you to create bots\nthat interact with your GitLab projects. These bots can automatically\nperform a wide range of actions, including:\n\n\n* **Labeling:** Automatically categorize issues and merge requests.  \n\n* **Commenting:** Provide updates, request information, or give feedback.  \n\n* **Assigning:** Assign issues and merge requests to the appropriate team\nmembers.  \n\n* **Closing:** Close stale or resolved issues and merge requests.  \n\n* **Creating:** Generate new issues based on specific events or\nconditions.  \n\n* **And much more!**\n\n\nCheck out the [`gitlab-triage` gem\nrepository](https://gitlab.com/gitlab-org/ruby/gems/gitlab-triage). \n\n\n## Setting up your triage bot\n\n\nLet's get your first triage bot up and running!\n\n\n1. Install the gem. (Note: The gem command is available with Ruby\nprogramming language installed.)\n\n\n```bash\n\ngem install gitlab-triage\n\n```\n\n\n2. Get your GitLab API token.\n\n\n* Go to your GitLab [profile\nsettings](https://gitlab.com/-/profile/preferences).  \n\n* Navigate to **Access Tokens**.  \n\n* Create a new token with the `api` scope.  \n\n* **Keep your token secure and set an expiration date for it based on when\nyou will be done with this walkthrough!**\n\n\n3. Define your triage policies.\n\n\nCreate a file named `.triage-policies.yml` in your project's root directory.\nThis file will contain the rules that govern your bot's behavior. Here's a\nsimple example:\n\n\n```yaml\n\n\n---\n\n- name: \"Apply 'WIP' label\"\n  condition:\n    draft: true\n  action:\n    labels:\n      - status::wip\n\n- name: \"Request more information on old issue\"\n  condition:\n   date:\n    attribute: updated_at\n    condition: older_than\n    interval_type: months\n    interval: 12\n  action:\n    comment: |\n      {{author}} This issue has been open for more than 12 months, is this still an issue?\n```\n\n\nThis configuration defines two policies:\n\n\n* The first policy applies the `status::wip` label to any issue that is in\ndraft.  \n\n* The second policy adds a comment to an issue that the issue has not been\nupdated in 12 months.\n\n\n4. Run your bot.\n\n\nYou can run your bot manually using the following command:\n\n\n```bash\n\ngitlab-triage -t \u003Cyour_api_token> -p \u003Cyour_project_id>\n\n```\n\n\nReplace `\u003Cyour_api_token>` with your GitLab API token and\n`\u003Cyour_project_id>` with the [ID of your GitLab\nproject](https://docs.gitlab.com/user/project/working_with_projects/#access-a-project-by-using-the-project-id).\nIf you would like to see the impact of actions before they are taken, you\ncan add the `-n` or `--dry-run` to test out the policies first.\n\n\n## Automating with GitLab CI/CD\n\n\nTo automate the execution of your triage bot, integrate it with [GitLab\nCI/CD](https://about.gitlab.com/blog/ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation/).\nHere's an example `.gitlab-ci.yml` configuration:\n\n\n```yaml\n\n\ntriage:\n  script:\n    - gem install gitlab-triage\n    - gitlab-triage -t $GITLAB_TOKEN -p $CI_PROJECT_ID\n  only:\n    - schedules\n```\n\n\nThis configuration defines a job named \"triage\" that installs the\n`gitlab-triage` gem and runs the bot using the `$GITLAB_TOKEN` (a predefined\n[CI/CD variable](https://docs.gitlab.com/ci/variables/)) and the\n`$CI_PROJECT_ID` variable. The `only: schedules` clause ensures that the job\nruns only on a schedule.\n\n\nTo create a\n[schedule](https://docs.gitlab.com/ee/ci/pipelines/schedules.html), go to\nyour project's **CI/CD** settings and navigate to **Schedules**. Create a\nnew schedule and define the frequency at which you want your bot to run\n(e.g., daily, hourly).\n\n\n## Advanced triage policies\n\n\n`gitlab-triage` offers a range of advanced features for creating more\ncomplex triage policies:\n\n\n* **Regular expressions:** Use regular expressions for more powerful pattern\nmatching.  \n\n* **Summary policies:** Consolidate related issues into a single summary\nissue.  \n\n* **Custom actions:** Define custom actions using [Ruby code\nblocks](https://gitlab.com/gitlab-org/ruby/gems/gitlab-triage#can-i-customize)\nto perform more complex operations using the GitLab API.\n\n\nHere are two advanced real-world examples from the triage bot used by the\nDeveloper Advocacy team at GitLab. You can view the full policies in [this\nfile](https://gitlab.com/gitlab-da/projects/devrel-bot/-/blob/master/.triage-policies.yml?ref_type=heads).\n\n\n```yaml\n\n- name: Issues where DA team member is an assignee outside DA-Meta project\ni.e. DevRel-Influenced\n  conditions:\n    assignee_member:\n      source: group\n      condition: member_of\n      source_id: 1008\n    state: opened\n    ruby: get_project_id != 18 \n    forbidden_labels:\n      - developer-advocacy\n  actions:   \n    labels:\n      - developer-advocacy\n      - DevRel-Influenced\n      - DA-Bot::Skip\n```\n\n\nThis example for issues across a group, excluding those in the project with\nthe ID of 18, have assignees who are members of the group with ID of 1008\nand do not have the label `developer-advocacy` on them. This policy helps\nthe Developer Advocacy team at GitLab to find issues members of the team are\nassigned to but are not in their team’s project. This helps the team\nidentify and keep track of contributions made outside of the team by adding\nthe teams’ labels.\n\n\n```\n\n- name: Missing Due Dates\n  conditions:\n    ruby: missing_due_date\n    state: opened\n    labels:\n      - developer-advocacy\n    forbidden_labels:\n      - DA-Due::N/A\n      - DA-Bot::Skip\n      - DA-Status::FYI\n      - DA-Status::OnHold\n      - CFP\n      - DA-Bot::Triage\n  actions:\n    labels:\n      - DA-Bot-Auto-Due-Date\n    comment: |\n      /due #{get_current_quarter_last_date}\n```\n\n\nThis second example checks for all issues with the `developer-advocacy`\nlabel, which do not include labels in the forbidden labels list and when\ntheir due dates have passed. It updates the due dates automatically by\ncommenting on the issue with a slash command and a date that is generated\nusing Ruby.\n\n\nThe Ruby scripts used in the policies are defined in a separate file as\nshown below. This feature allows you to be flexible in working with your\nfilters and actions. You can see functions are created for different Ruby\ncommands that we used in our policies. \n\n\n```\n\nrequire 'json'\n\nrequire 'date'\n\nrequire \"faraday\"\n\nrequire 'dotenv/load'\n\n\nmodule DATriagePlugin\n  def last_comment_at\n    conn = Faraday.new(\n      url: notes_url+\"?sort=desc&order_by=created_at&pagination=keyset&per_page=1\",\n      headers: {'PRIVATE-TOKEN' => ENV.fetch(\"PRIV_KEY\"), 'Content-Type' => 'application/json' }\n    )\n\n    response = conn.get()\n    if response.status == 200\n      jsonData = JSON.parse(response.body)\n      if jsonData.length > 0\n        Date.parse(jsonData[0]['created_at'])\n      else\n        Date.parse(resource[:created_at])\n      end\n    else\n      Date.parse(resource[:created_at])\n    end\n  end\n\n  def notes_url\n    resource[:_links][:notes]\n  end\n\n  def get_project_id\n    resource[:project_id]\n  end\n\n  def get_current_quarter_last_date()\n    yr = Time.now.year\n    case Time.now.month\n    when 2..4\n      lm = 4\n    when 5..7\n      lm = 7\n    when 8..10\n      lm = 10\n    when 11..12\n      lm = 1\n      yr = yr + 1\n    else\n      lm = 1    \n    end\n\n    return Date.new(yr, lm, -1) \n  end\n\n  def one_week_to_due_date\n    if(resource[:due_date] == nil)\n      false\n    else\n      days_to_due = (Date.parse(resource[:due_date]) - Date.today).to_i\n      if(days_to_due > 0 && days_to_due \u003C 7)\n        true\n      else\n        false\n      end\n    end\n  end\n\n  def due_date_past\n    if(resource[:due_date] == nil)\n      false\n    else\n      Date.today > Date.parse(resource[:due_date])\n    end\n  end\n\n  def missing_due_date\n    if(resource[:due_date] == nil)\n      true\n    else\n      false\n    end\n  end\n\nend\n\n\nGitlab::Triage::Resource::Context.include DATriagePlugin\n\n\n```\n\nThe triage bot is executed using the command:\n\n\n``` \n\n`gitlab-triage -r ./triage_bot/issue_triage_plugin.rb --debug --token\n$PRIV_KEY --source-id gitlab-com --source groups`  \n\n```\n\n\n- `-r`: Passes in a  file of requirements for the performing triage. In this\ncase we are passing in our Ruby functions.  \n\n- `--debug`: Prints debugging information as part of the output.  \n\n- `--token`: Is used to pass in a valid GitLab API token.  \n\n- `--source`: Specifies if the sources of the issues it will search is\nwithin a group or a project.  \n\n- `--source-id`: Takes in the ID of the selected source type – in this case,\na group.\n\n\nThe GitLab [triage-ops](https://gitlab.com/gitlab-org/quality/triage-ops)\nproject is another real-world example that is more complex and you can learn\nhow to build your own triage bot.\n\n\n## Best practices\n\n\n* **Start simple:** Begin with basic policies and gradually increase\ncomplexity as needed. \n\n* **Test thoroughly:** Test your policies in a staging environment before\ndeploying them to production.  \n\n* **Monitor regularly:** Monitor your bot's activity to ensure it's behaving\nas expected. \n\n* **Use descriptive names:** Give your policies clear and descriptive names\nfor easy maintenance. \n\n* **Be mindful of the scope of your filters:** You might be tempted to\nfilter issues across groups where thousands of issues exist. However, this\ncan slow down the triage and also make the process fail due to rate\nlimitations against the GitLab API.  \n\n* **Prioritize using labels for triages:** To avoid spamming other users,\nlabels are a good way to perform triages without cluttering comments and\nissues.\n\n\n## Take control of your workflow\n\n\nWith the `gitlab-triage` gem, you can automate your GitLab workflow and\nunlock new levels of efficiency. Start by creating simple triage bots and\ngradually explore the more advanced features. You'll be amazed at how much\ntime and effort you can save\\!\n\n\n> #### Want to take your learning to the next level? [Sign up for GitLab\nUniversity courses](https://university.gitlab.com/). Or you can get going\nright away with a [free trial of GitLab\nUltimate](https://about.gitlab.com/free-trial/).\n\n\n## \"Getting started with GitLab\" series\n\nRead more articles in our \"Getting started with GitLab\" series:\n\n\n- [How to manage\nusers](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-manage-users/)\n\n- [How to import your projects to\nGitLab](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-import-your-projects-to-gitlab/)  \n\n- [Mastering project\nmanagement](https://about.gitlab.com/blog/getting-started-with-gitlab-mastering-project-management/)\n\n- [Understanding\nCI/CD](https://about.gitlab.com/blog/getting-started-with-gitlab-understanding-ci-cd/)\n\n- [Working with CI/CD\nvariables](https://about.gitlab.com/blog/getting-started-with-gitlab-working-with-ci-cd-variables/)\n",[495,917,693,999,9],{"slug":1086,"featured":6,"template":700},"automating-agile-workflows-with-the-gitlab-triage-gem","content:en-us:blog:automating-agile-workflows-with-the-gitlab-triage-gem.yml","Automating Agile Workflows With The Gitlab Triage Gem","en-us/blog/automating-agile-workflows-with-the-gitlab-triage-gem.yml","en-us/blog/automating-agile-workflows-with-the-gitlab-triage-gem",{"_path":1092,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1093,"content":1099,"config":1106,"_id":1108,"_type":14,"title":1109,"_source":16,"_file":1110,"_stem":1111,"_extension":19},"/en-us/blog/automating-boring-git-operations-gitlab-ci",{"title":1094,"description":1095,"ogTitle":1094,"ogDescription":1095,"noIndex":6,"ogImage":1096,"ogUrl":1097,"ogSiteName":685,"ogType":686,"canonicalUrls":1097,"schema":1098},"GitBot – automating boring Git operations with CI","Guest author Kristian Larsson shares how he automates some common Git operations, like rebase, using GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672374/Blog/Hero%20Images/gitbot-automate-git-operations.jpg","https://about.gitlab.com/blog/automating-boring-git-operations-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitBot – automating boring Git operations with CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kristian Larsson\"}],\n        \"datePublished\": \"2017-11-02\",\n      }",{"title":1094,"description":1095,"authors":1100,"heroImage":1096,"date":1102,"body":1103,"category":718,"tags":1104},[1101],"Kristian Larsson","2017-11-02","Git is super useful for anyone doing a bit of development work or just\ntrying to\n\nkeep track of a bunch of text files. However, as your project grows you\nmight\n\nfind yourself doing lots of boring repetitive work just around Git itself.\nAt\n\nleast that’s what happened to me and so I automated some boring Git stuff\nusing our\n\n[continuous integration (CI) system](/solutions/continuous-integration/).\n\n\n\u003C!-- more -->\n\n\nThere are probably all sorts of use cases for automating various Git\noperations\n\nbut I’ll talk about a few that I’ve encountered. We’re using GitLab and\n[GitLab\n\nCI](/solutions/continuous-integration/) so that’s what my examples\n\nwill include, but most of the concepts should apply to other systems as\nwell.\n\n\n## Automatic rebase\n\n\nWe have some Git repos with source code that we receive from vendors, who we\ncan think\n\nof as our `upstream`. We don’t actually share a Git repo with the vendor but\n\nrather we get a tar ball every now and then. The tar ball is extracted into\na\n\nGit repository, on the `master` branch which thus tracks the software as it\nis\n\nreceived from upstream. In a perfect world the software we receive would be\n\nfeature complete and bug free and so we would be done, but that’s usually\nnot\n\nthe case. We do find bugs and if they are blocking we might decide to\nimplement\n\na patch to fix them ourselves. The same is true for new features where we\nmight\n\nnot want to wait for the vendor to implement it.\n\n\nThe result is that we have some local patches to apply. We commit such\npatches\n\nto a separate branch, commonly named `ts` (for TeraStream), to keep them\n\nseparate from the official software. Whenever a new software version is\nreleased,\n\nwe extract its content to `master` and then rebase our `ts` branch onto\n`master`\n\nso we get all the new official features together with our patches. Once\nwe’ve\n\nimplemented something we usually send it upstream to the vendor for\ninclusion.\n\nSometimes they include our patches verbatim so that the next version of the\ncode\n\nwill include our exact patch, in which case a rebase will simply skip our\npatch.\n\nOther times there are slight or major (it might be a completely different\ndesign)\n\nchanges to the patch and then someone typically needs to sort out the\npatches\n\nmanually. Mostly though, rebasing works just fine and we don’t end up with\nconflicts.\n\n\nNow, this whole rebasing process gets a tad boring and repetitive after a\nwhile,\n\nespecially considering we have a dozen of repositories with the setup\ndescribed\n\nabove. What I recently did was to automate this using our CI system.\n\n\nThe workflow thus looks like:\n\n\n- human extracts zip file, git add + git commit on master + git push\n\n- CI runs for `master` branch\n   - clones a copy of itself into a new working directory\n   - checks out `ts` branch (the one with our patches) in working directory\n   - rebases `ts` onto `master`\n   - push `ts` back to `origin`\n- this event will now trigger a CI build for the `ts` branch\n\n- when CI runs for the `ts` branch, it will compile, test and save the\nbinary output as “build artifacts”, which can be included in other\nrepositories\n\n- GitLab CI, which is what we use, has a CI_PIPELINE_ID that we use to\nversion built container images or artifacts\n\n\nTo do this, all you need is a few lines in a .gitlab-ci.yml file,\nessentially;\n\n\n```\n\nstages:\n  - build\n  - git-robot\n\n... build jobs ...\n\n\ngit-rebase-ts:\n  stage: git-robot\n  only:\n    - master\n  allow_failure: true\n  before_script:\n    - 'which ssh-agent || ( apt-get update -y && apt-get install openssh-client -y )'\n    - eval $(ssh-agent -s)\n    - ssh-add \u003C(echo \"$GIT_SSH_PRIV_KEY\")\n    - git config --global user.email \"kll@dev.terastrm.net\"\n    - git config --global user.name \"Mr. Robot\"\n    - mkdir -p ~/.ssh\n    - cat gitlab-known-hosts >> ~/.ssh/known_hosts\n  script:\n    - git clone git@gitlab.dev.terastrm.net:${CI_PROJECT_PATH}.git\n    - cd ${CI_PROJECT_NAME}\n    - git checkout ts\n    - git rebase master\n    - git push --force origin ts\n  ```\n\nWe’ll go through the Yaml file a few lines at a time. Some basic knowledge\nabout GitLab CI is assumed.\n\n\nThis first part lists the stages of our pipeline.\n\n\n```\n\nstages:\n  - build\n  - git-robot\n  ```\n\nWe have two stages, first the `build` stage, which does whatever you want it\nto\n\ndo (ours compiles stuff, runs a few unit tests and packages it all up), then\nthe\n\n`git-robot` stage which is where we perform the rebase.\n\n\nThen there’s:\n\n\n```\n\ngit-rebase-ts:\n  stage: git-robot\n  only:\n    - master\n  allow_failure: true\n  ```\n\nWe define the stage in which we run followed by the only statement which\nlimits\n\nCI jobs to run only on the specified branch(es), in this case `master`.\n\n\n`allow_failure` simply allows the CI job to fail but still passing the\npipeline.\n\n\nSince we are going to clone a copy of ourselves (the repository checked out\nin\n\nCI) we need SSH and SSH keys set up. We’ll use ssh-agent with a\npassword-less key\n\nto authenticate. Generate a key using ssh-keygen, for example:\n\n\n```\n\nssh-keygen\n\n\nkll@machine ~ $ ssh-keygen -f foo\n\nGenerating public/private rsa key pair.\n\nEnter passphrase (empty for no passphrase):\n\nEnter same passphrase again:\n\nYour identification has been saved in foo.\n\nYour public key has been saved in foo.pub.\n\nThe key fingerprint is:\n\nSHA256:6s15MZJ1/kUsDU/PF2WwRGA963m6ZSwHvEJJdsRzmaA kll@machine\n\nThe key's randomart image is:\n\n+---[RSA 2048]----+\n\n|            o**.*|\n\n|           ..o**o|\n\n|           Eo o%o|\n\n|          .o.+o O|\n\n|        So oo.o+.|\n\n|       .o o.. o+o|\n\n|      .  . o..o+=|\n\n|     . o ..  .o= |\n\n|      . +.    .. |\n\n+----[SHA256]-----+\n\nkll@machine ~ $\n\n```\n\n\nAdd the public key as a deploy key under Project Settings\n\n\u003Ci class=\"fas fa-arrow-right\" aria-hidden=\"true\">\u003C/i> Repository \u003Ci\nclass=\"fas fa-arrow-right\" aria-hidden=\"true\">\u003C/i>\n\nDeploy Keys. Make sure you enable write access or you won’t be able to have\nyour\n\nGit robot push commits. We then need to hand over the private key so that it\ncan\n\nbe accessed from within the CI job. We’ll use a secret environment variable\nfor\n\nthat, which you can define under Project Settings\n\n\u003Ci class=\"fas fa-arrow-right\" aria-hidden=\"true\">\u003C/i> Pipelines \u003Ci\nclass=\"fas fa-arrow-right\" aria-hidden=\"true\">\u003C/i>\n\nEnvironment variables). I’ll use the environment variable GIT_SSH_PRIV_KEY\nfor this.\n\n\nNext part is the before_script:\n\n\n```\n  before_script:\n    - 'which ssh-agent || ( apt-get update -y && apt-get install openssh-client -y )'\n    - eval $(ssh-agent -s)\n    - ssh-add \u003C(echo \"$GIT_SSH_PRIV_KEY\")\n    - git config --global user.email \"kll@dev.terastrm.net\"\n    - git config --global user.name \"Mr. Robot\"\n    - mkdir -p ~/.ssh\n    - cat gitlab-known-hosts >> ~/.ssh/known_hosts\n  ```\n\nFirst ssh-agent is installed if it isn’t already. We then start up ssh-agent\nand\n\nadd the key stored in the environment variable GIT_SSH_PRIV_KEY (which we\nset up\n\npreviously). The Git user information is set and we finally create .ssh and\nadd\n\nthe known host information about our GitLab server to our known_hosts file.\nYou\n\ncan generate the gitlab-known-hosts file using the following command:\n\n\n```\n\nssh-keyscan my-gitlab-machine >> gitlab-known-hosts\n\n```\n\n\nAs the name implies, the before_script is run before the main `script` part\nand\n\nthe ssh-agent we started in the before_script will also continue to run for\nthe\n\nduration of the job. The ssh-agent information is stored in some environment\n\nvariables which are carried across from the before_script into the main\nscript,\n\nenabling it to work. It’s also possible to put this SSH setup in the main\nscript,\n\nI just thought it looked cleaner splitting it up between before_script and\nscript.\n\nNote however that it appears that after_script behaves differently so while\nit’s\n\npossible to pass environment vars from before_script to script, they do not\n\nappear to be passed to after_script. Thus, if you want to do Git magic in\nthe\n\nafter_script you also need to perform the SSH setup in the after_script.\n\n\nThis brings us to the main script. In GitLab CI we already have a\nchecked-out\n\nclone of our project but that was automatically checked out by the CI system\n\nthrough the use of magic (it actually happens in a container previous to the\none\n\nwe are operating in, that has some special credentials) so we can’t really\nuse\n\nit, besides, checking out other branches and stuff would be really weird as\nit\n\ndisrupts the code we are using to do this, since that’s available in the Git\n\nrepository that’s checked out. It’s all rather meta.\n\n\nAnyway, we’ll be checking out a new Git repository where we’ll do our work,\nthen\n\nchange the current directory to the newly checked-out repository, after\nwhich\n\nwe’ll check out the `ts` branch, do the rebase and push it back to the\norigin remote.\n\n\n```\n    - git clone git@gitlab.dev.terastrm.net:${CI_PROJECT_PATH}.git\n    - cd ${CI_PROJECT_NAME}\n    - git checkout ts\n    - git rebase master\n    - git push --force origin ts\n  ```\n\n… and that’s it. We’ve now automated the rebasing of a branch in our config\nfile. Occasionally it\n\nwill fail due to problems rebasing (most commonly merge conflicts) but then\nyou\n\ncan just step in and do the above steps manually and be interactively\nprompted\n\non how to handle conflicts.\n\n\n## Automatic merge requests\n\n\nAll the repositories I mentioned in the previous section are NEDs, a form of\n\ndriver for how to communicate with a certain type of device, for Cisco NSO\n(a\n\nnetwork orchestration system). We package up Cisco NSO, together with these\nNEDs\n\nand our own service code, in a container image. The build of that image is\n\nperformed in CI and we use a repository called `nso-ts` to control that\nwork.\n\n\nThe NEDs are compiled in CI from their own repository and the binaries are\nsaved\n\nas build artifacts. Those artifacts can then be pulled in the CI build of\n`nso-ts`.\n\nThe reference to which artifact to include is the name of the NED as well as\nthe\n\nbuild version. The version number of the NED is nothing more than the\npipeline\n\nid (which you’ll access in CI as ${CI_PIPELINE_ID}) and by including a\nspecific\n\nversion of the NED, rather than just use “latest” we gain a much more\nconsistent\n\nand reproducible build.\n\n\nWhenever a NED is updated a new build is run that produces new binary\nartifacts.\n\nWe probably want to use the new version but not before we test it out in CI.\nThe\n\nactual versions of NEDs to use is stored in a file in the `nso-ts`\nrepository and\n\nfollows a simple format, like this:\n\n\n```\n\nned-iosxr-yang=1234\n\nned-junos-yang=4567\n\n...\n\n```\n\n\nThus, updating the version to use is a simple job to just rewrite this text\nfile\n\nand replace the version number with a given CI_PIPELINE_ID version number.\nAgain,\n\nwhile NED updates are more seldom than updates to `nso-ts`, they do occur\nand\n\nhandling it is bloody boring. Enter automation!\n\n\n```\n\ngit-open-mr:\n  image: gitlab.dev.terastrm.net:4567/terastream/cisco-nso/ci-cisco-nso:4.2.3\n  stage: git-robot\n  only:\n    - ts\n  tags:\n    - no-docker\n  allow_failure: true\n  before_script:\n    - 'which ssh-agent || ( apt-get update -y && apt-get install openssh-client -y )'\n    - eval $(ssh-agent -s)\n    - ssh-add \u003C(echo \"$GIT_SSH_PRIV_KEY\")\n    - git config --global user.email \"kll@dev.terastrm.net\"\n    - git config --global user.name \"Mr. Robot\"\n    - mkdir -p ~/.ssh\n    - cat gitlab-known-hosts >> ~/.ssh/known_hosts\n  script:\n    - git clone git@gitlab.dev.terastrm.net:TeraStream/nso-ts.git\n    - cd nso-ts\n    - git checkout -b robot-update-${CI_PROJECT_NAME}-${CI_PIPELINE_ID}\n    - for LIST_FILE in $(ls ../ned-package-list.* | xargs -n1 basename); do NED_BUILD=$(cat ../${LIST_FILE}); sed -i packages/${LIST_FILE} -e \"s/^${CI_PROJECT_NAME}.*/${CI_PROJECT_NAME}=${NED_BUILD}/\"; done\n    - git diff\n    - git commit -a -m \"Use ${CI_PROJECT_NAME} artifacts from pipeline ${CI_PIPELINE_ID}\"\n    - git push origin robot-update-${CI_PROJECT_NAME}-${CI_PIPELINE_ID}\n    - HOST=${CI_PROJECT_URL} CI_COMMIT_REF_NAME=robot-update-${CI_PROJECT_NAME}-${CI_PIPELINE_ID} CI_PROJECT_NAME=TeraStream/nso-ts GITLAB_USER_ID=${GITLAB_USER_ID} PRIVATE_TOKEN=${PRIVATE_TOKEN} ../open-mr.sh\n```\n\n\nSo this time around we check out a Git repository into a separate working\n\ndirectory again, it’s just that it’s not the same Git repository as we are\n\nrunning on simply because we are trying to do changes to a repository that\nis\n\nusing the output of the repository we are running on. It doesn’t make much\nof a\n\ndifference in terms of our process. At the end, once we’ve modified the\nfiles we\n\nare interested in, we also open up a merge request on the target repository.\n\nHere we can see the MR (which is merged already) to use a new version of the\n\nNED `ned-snabbaftr-yang`.\n\n\n\u003Cimg src=\"/images/blogimages/gitbot-ned-update-mr.png\" alt=\"MR using new\nversion of NED\" style=\"width: 700px;\"/>{: .shadow}\n\n\nWhat we end up with is that whenever there is a new version of a NED, a\nsingle merge\n\nrequest is opened on our `nso-ts` repository to start using the new NED.\nThat\n\nmerge request is using changes on a new branch and CI will obviously run for\n\n`nso-ts` on this new branch, which will then test all of our code using the\nnew\n\nversion of the NED. We get a form of version pinning, with the form of\nexplicit\n\nchanges that it entails, yet it’s a rather convenient and non-cumbersome\n\nenvironment to work with thanks to all the automation.\n\n\n## Getting fancy\n\n\nWhile automatically opening an MR is sweet… we can do ~~better~~fancier. Our\n`nso-ts`\n\nrepository is based on Cisco NSO (Tail-F NCS), or actually the `nso-ts`\nDocker\n\nimage is based on a `cisco-nso` Docker image that we build in a separate\n\nrepository. We put the version of NSO as the tag of the `cisco-nso` Docker\n\nimage, so `cisco-nso:4.2.3` means Cisco NSO 4.2.3. This is what the `nso-ts`\n\nDockerfile will use in its `FROM` line.\n\n\nUpgrading to a new version of NCS is thus just a matter of rewriting the\ntag…\n\nbut what version of NCS should we use? There’s 4.2.4, 4.3.3, 4.4.2 and 4.4.3\n\navailable and I’m sure there’s some other version that will pop up its evil\n\nhead soon enough. How do I know which version to pick? And will our current\ncode\n\nwork with the new version?\n\n\nTo help myself in the choice of NCS version I implemented a script that gets\nthe\n\nREADME file of a new NCS version and cross references the list of fixed\nissues\n\nwith the issues that we currently have open in the Tail-F issue tracker. The\n\noutput of this is included in the merge request description so when I look\nat\n\nthe merge request I immediately know what bugs are fixed or new features are\n\nimplemented by moving to a specific version. Having this automatically\ngenerated\n\nfor us is… well, it’s just damn convenient. Together with actually testing\nour\n\ncode with the new version of NCS gives us confidence that an upgrade will be\nsmooth.\n\n\nHere are the merge requests currently opened by our GitBot:\n\n\n\u003Cimg src=\"/images/blogimages/automate-git-merge-requests.png\" alt=\"Merge\nrequests automated by Git bot\" style=\"width: 700px;\"/>{: .shadow}\n\n\nWe can see how the system have generated MRs to move to all the different\n\nversions of NSO currently available. As we are currently on NSO v4.2.3\nthere’s\n\nno underlying branch for that one leading to an errored build. For the other\n\nversions though, there is a branch per version that executes the CI pipeline\nto\n\nmake sure all our code runs with this version of NSO.\n\n\nAs there have been a few commits today, these branches are behind by six\ncommits\n\nbut will be rebased this night so we get an up-to-date picture if they work\nor\n\nnot with our latest code.\n\n\n\u003Cimg src=\"/images/blogimages/automate-git-commits.png\" alt=\"Commits\"\nstyle=\"width: 700px;\"/>{: .shadow}\n\n\nIf we go back and look at one of these merge requests, we can see how the\n\ndescription includes information about what issues that we currently have\nopen\n\nwith Cisco / Tail-F would be solved by moving to this version.\n\n\n\u003Cimg src=\"/images/blogimages/automate-git-mr-description.png\" alt=\"Merge\nrequest descriptions\" style=\"width: 700px;\"/>{: .shadow}\n\n\nThis is from v4.2.4 and as we are currently on v4.2.3 we can see that there\nare\n\nonly a few fixed issues.\n\n\nIf we instead look at v4.4.3 we can see that the list is significantly\nlonger.\n\n\n\u003Cimg src=\"/images/blogimages/automate-git-mr-description-list.png\"\nalt=\"Merge request descriptions\" style=\"width: 700px;\"/>{: .shadow}\n\n\nPretty sweet, huh? :)\n\n\nAs this involves a bit more code I’ve put the relevant files in a [GitHub\ngist](https://gist.github.com/plajjan/42592665afd5ae045ee36220e19919aa).\n\n\n## This is the end\n\n\nIf you are reading this, chances are you already have your reasons for why\nyou\n\nwant to automate some Git operations. Hopefully I’ve provided some\ninspiration\n\nfor how to do it.\n\n\nIf not or if you just want to discuss the topic in general or have more\nspecific\n\nquestions about our setup, please do reach out to me on\n[Twitter](https://twitter.com/plajjan).\n\n\n_[This post](http://plajjan.github.io/automating-git/) was originally\npublished on [plajjan.github.io](http://plajjan.github.io/)._\n\n\n## About the Guest Author\n\n\nKristian Larsson is a network automation systems architect at Deutsche\nTelekom.\n\nHe is working on automating virtually all aspects of running TeraStream, the\n\ndesign for Deutsche Telekom's next generation fixed network, using robust\nand\n\nfault tolerant software. He is active in the IETF as well as being a\n\nrepresenting member in OpenConfig. Previous to joining Deutsche Telekom,\n\nKristian was the IP & opto network architect for Tele2's international\nbackbone\n\nnetwork.\n\n\n\"[BB-8 in action](https://unsplash.com/photos/C8VWyZhcIIU) by [Joseph\nChan](https://unsplash.com/@yulokchan) on Unsplash\n\n{: .note}\n",[9,763,1105],"git",{"slug":1107,"featured":6,"template":700},"automating-boring-git-operations-gitlab-ci","content:en-us:blog:automating-boring-git-operations-gitlab-ci.yml","Automating Boring Git Operations Gitlab Ci","en-us/blog/automating-boring-git-operations-gitlab-ci.yml","en-us/blog/automating-boring-git-operations-gitlab-ci",{"_path":1113,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1114,"content":1120,"config":1128,"_id":1130,"_type":14,"title":1131,"_source":16,"_file":1132,"_stem":1133,"_extension":19},"/en-us/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab",{"title":1115,"description":1116,"ogTitle":1115,"ogDescription":1116,"noIndex":6,"ogImage":1117,"ogUrl":1118,"ogSiteName":685,"ogType":686,"canonicalUrls":1118,"schema":1119},"Automating container image migration from Amazon ECR to GitLab","When platform teams move their CI/CD to GitLab, migrating container images shouldn't be the bottleneck. Follow this step-by-step guide to automate the pipeline migration process.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663129/Blog/Hero%20Images/blog-image-template-1800x945__28_.png","https://about.gitlab.com/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automating container image migration from Amazon ECR to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2025-02-13\",\n      }",{"title":1115,"description":1116,"authors":1121,"heroImage":1117,"date":1123,"body":1124,"category":718,"tags":1125},[1122],"Tim Rizzi","2025-02-13","\"We need to migrate hundreds of container images from Amazon Elastic\nContainer Registry (ECR) to GitLab. Can you help?\" This question kept coming\nup in conversations with platform engineers. They were modernizing their\nDevSecOps toolchain with GitLab but got stuck when faced with moving their\ncontainer images. While each image transfer is simple, the sheer volume made\nit daunting.\n\n\nOne platform engineer perfectly said, \"I know exactly what needs to be done\n– pull, retag, push. But I have 200 microservices, each with multiple tags.\nI can't justify spending weeks on this migration when I have critical\ninfrastructure work.\"\n\n\n## The challenge\n\n\nThat conversation sparked an idea. What if we could automate the entire\nprocess? When platform teams move their\n[CI/CD](https://about.gitlab.com/topics/ci-cd/) to GitLab, migrating\ncontainer images shouldn't be the bottleneck. The manual process is\nstraightforward but repetitive – pull each image, retag it, and push it to\nGitLab's Container Registry. Multiply this by dozens of repositories and\nmultiple tags per image, and you're looking at days or weeks of tedious\nwork.\n\n\n## The solution\n\n\nWe set out to create a GitLab pipeline that would automatically do all this\nheavy lifting. The goal was simple: Give platform engineers a tool they\ncould set up in minutes and let run overnight, waking up to find all their\nimages migrated successfully.\n\n\n### Setting up access\n\n\nFirst things first – security. We wanted to ensure teams could run this\nmigration with minimal AWS permissions. Here's the read-only identity and\naccess management (IAM) policy you'll need:\n\n\n```json\n\n{\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n        {\n            \"Effect\": \"Allow\",\n            \"Action\": [\n                \"ecr:GetAuthorizationToken\",\n                \"ecr:BatchCheckLayerAvailability\",\n                \"ecr:GetDownloadUrlForLayer\",\n                \"ecr:DescribeRepositories\",\n                \"ecr:ListImages\",\n                \"ecr:DescribeImages\",\n                \"ecr:BatchGetImage\"\n            ],\n            \"Resource\": \"*\"\n        }\n    ]\n}\n\n```\n\n\n### GitLab configuration\n\n\nWith security handled, the next step is setting up GitLab. We kept this\nminimal - you'll need to configure these variables in your CI/CD settings:\n\n\n```\n\nAWS_ACCOUNT_ID: Your AWS account number\n\nAWS_DEFAULT_REGION: Your ECR region\n\nAWS_ACCESS_KEY_ID: [Masked]\n\nAWS_SECRET_ACCESS_KEY: [Masked]\n\nBULK_MIGRATE: true\n\n```\n\n\n### The migration pipeline\n\n\nNow for the interesting part. We built the pipeline using Docker-in-Docker\nto handle all the image operations reliably:\n\n\n```yaml\n\nimage: docker:20.10\n\nservices:\n  - docker:20.10-dind\n\nbefore_script:\n  - apk add --no-cache aws-cli jq\n  - aws sts get-caller-identity\n  - aws ecr get-login-password | docker login --username AWS --password-stdin\n  - docker login -u ${CI_REGISTRY_USER} -p ${CI_REGISTRY_PASSWORD} ${CI_REGISTRY}\n```\n\n\nThe pipeline works in three phases, each building on the last:\n\n\n1. Discovery\n\n\nFirst, it finds all your repositories:\n\n\n```bash\n\nREPOS=$(aws ecr describe-repositories --query\n'repositories[*].repositoryName' --output text)\n\n```\n\n\n2. Tag enumeration\n\n\nThen, for each repository, it gets all the tags:\n\n\n```bash\n\nTAGS=$(aws ecr describe-images --repository-name $repo --query\n'imageDetails[*].imageTags[]' --output text)\n\n```\n\n\n3. Transfer\n\n\nFinally, it handles the actual migration:\n\n\n```bash\n\ndocker pull\n${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/${repo}:${tag}\n\ndocker tag\n${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/${repo}:${tag}\n${CI_REGISTRY_IMAGE}/${repo}:${tag}\n\ndocker push ${CI_REGISTRY_IMAGE}/${repo}:${tag}\n\n```\n\n\n## What you get\n\n\nRemember that platform engineer who didn't want to spend weeks on migration?\nHere's what this solution delivers:\n\n\n- automated discovery and migration of all repositories and tags\n\n- consistent image naming between ECR and GitLab\n\n- error handling for failed transfers\n\n- clear logging for tracking progress\n\n\nInstead of writing scripts and babysitting the migration, the platform\nengineer could focus on more valuable work.\n\n\n## Usage\n\n\nGetting started is straightforward:\n\n\n1. Copy the `.gitlab-ci.yml` to your repository.\n\n2. Configure the AWS and GitLab variables.\n\n3. Set `BULK_MIGRATE` to \"true\" to start the migration.\n\n\n## Best practices\n\n\nThrough helping teams with their migrations, we've learned a few things:\n\n\n- Run during off-peak hours to minimize the impact on your team.\n\n- Keep an eye on the pipeline logs - they'll tell you if anything needs\nattention.\n\n- Don't decommission ECR until you've verified all images transferred\nsuccessfully.\n\n- For very large migrations, consider adding rate limiting to avoid\noverwhelming your network\n\n\nWe've open-sourced this pipeline in our public GitLab repository because we\nbelieve platform engineers should spend time building valuable\ninfrastructure, not copying container images. Feel free to adapt it for your\nneeds or ask questions about implementation.\n\n\n> #### Get started with this and other package components with our [CI/CD\nCatalog\ndocumentation](https://gitlab.com/explore/catalog/components/package).\n",[9,1126,917,495,693,1127],"AWS","solutions architecture",{"slug":1129,"featured":91,"template":700},"automating-container-image-migration-from-amazon-ecr-to-gitlab","content:en-us:blog:automating-container-image-migration-from-amazon-ecr-to-gitlab.yml","Automating Container Image Migration From Amazon Ecr To Gitlab","en-us/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab.yml","en-us/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab",{"_path":1135,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1136,"content":1142,"config":1148,"_id":1150,"_type":14,"title":1151,"_source":16,"_file":1152,"_stem":1153,"_extension":19},"/en-us/blog/automating-cybersecurity-threat-detections-with-gitlab-ci-cd",{"title":1137,"description":1138,"ogTitle":1137,"ogDescription":1138,"noIndex":6,"ogImage":1139,"ogUrl":1140,"ogSiteName":685,"ogType":686,"canonicalUrls":1140,"schema":1141},"Automating cybersecurity threat detections with GitLab CI/CD","Discover how GUARD automates cybersecurity threat detections through the use\nof GitLab CI/CD and how it ensures high-quality detections.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663239/Blog/Hero%20Images/AdobeStock_1023776629.jpg","https://about.gitlab.com/blog/automating-cybersecurity-threat-detections-with-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automating cybersecurity threat detections with GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mitra Jozenazemian\"}],\n        \"datePublished\": \"2025-01-29\",\n      }",{"title":1137,"description":1138,"authors":1143,"heroImage":1139,"date":1145,"body":1146,"category":697,"tags":1147},[1144],"Mitra Jozenazemian","2025-01-29","*This blog post is the second post in a series about [GitLab Universal\nAutomated Response and Detection\n(GUARD)](https://about.gitlab.com/blog/unveiling-the-guard-framework-to-automate-security-detections-at-gitlab/).*\n\n\nWriting and deploying security threat detections in an organization’s\nsecurity information event management platform (SIEM) is a critical\ncomponent of a successful cybersecurity program. Moving from manual\ndetection engineering to a fully automated process by implementing\nDetections as Code (DaC) ensures detection consistency, quality, auditing,\nand automated testing. At GitLab, we’ve embedded DaC capabilities into\nGUARD, our fully automated detection and response framework. \n\n\n## The problem: Source control and automated tests\n\n\nThe [Signals\nEngineering](https://handbook.gitlab.com/handbook/security/security-operations/signals-engineering/)\nand\n[SIRT](https://handbook.gitlab.com/handbook/security/security-operations/sirt/)\nteam at GitLab share the responsibility to create, update, and decommission\nthreat detections in our SIEM. Maintaining a single source of truth for\ndetections is critical to ensure detection consistency and quality standards\nare met. Our teams made the conscious decision to abstract the detection\ncreation process from our SIEM, improving our issue tracking, consistency,\nroll-back process, and metrics. Additionally, conducting pre-commit\ndetection tests outside of our SIEM ensured that newly created detections\ndidn’t introduce overly false positive heavy alerts, which would require\ntuning or disablement while the alert was fixed. \n\n\n## The Solution: Leverage GitLab CI/CD for detection testing and validation\n\n\nTo address these challenges, we developed an efficient workflow using GitLab\n[CI/CD](https://about.gitlab.com/topics/ci-cd/), resulting in a streamlined\nand secure SIEM detection deployment process.\n\n\n### Key components of the GUARD DaC pipeline \n\n\n__1. Detections stored in JSON format in a GitLab project__\n\n\nGitLab uses the JSON format for our threat detections. The template includes\nessential information such as SIEM query logic, detection title, and\ndescription along with runbook page link, MITRE tactic and technique related\nto the detection, and other necessary details.\n\n\n__2. Initiating merge requests__\n\n\nWhen a GitLab team member intends to create a new threat detection, update\nan existing one, or delete a current detection, they initiate the process by\nsubmitting a merge request (MR) in the DaC project containing the detection\nJSON template. Creating the MR automatically triggers a CI/CD pipeline.\n\n\n__3. Automated validation with CI/CD jobs__\n\n\nEach MR contains a number of automated checks via GitLab CI/CD:   \n\n* Query format validation queries SIEM API to ensure detection query is\nvalid  \n\n* JSON Detection fields validation validates all required fields are\npresent, and are in the correct format   \n\n* New detections and detection modification trigger a number of SIEM API\ncalls to ensure the detection does not have any errors and that no issues\nwill be introduced into our production detection rules   \n\n* Detection deletion MRs trigger the pipeline to issue a SIEM API query to\nensure the detection to be deleted is still active and can be deleted \n\n\n__4. Peer review and approval__\n\n\nWhen a detection MR job completes successfully, a peer review is required to\nreview and confirm the MR meets required quality and content standards\nbefore the detection MR can be merged. [Merge request approval\nrules](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/rules.html)\nare used to trigger the peer review process. \n\n\n__5. Merge and final deployment__\n\n\nAfter the MR is approved, it is merged into the main branch. As part of the\nCI/CD pipeline, an automated job executes a SIEM API command in order to\nperform two tasks:   \n\n* Create the new detection or update/delete the existing detection if\nneeded.   \n\n* Extract the MITRE ATT&CK tactic and technique information related to the\nalert from the JSON files and transmit these details to a lookup table\nwithin the SIEM. This lookup table plays an important role in mapping our\nalerts to MITRE tactics and techniques, helping us improve our threat\nanalysis and identify gaps in our detection capabilities in alignment with\nthe MITRE framework.\n\n\n**Note:** The necessary credentials for these actions are securely stored in\n[CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) to ensure the\nprocess remains confidential and secure.\n\n\nBelow is a template GitLab CI/CD `gitlab-ci.yml` configuration file for a\nDaC pipeline: \n\n\n```\n\n\n#\n---------------------------------------------------------------------------\n#\n\n# GitLab CI/CD Pipeline for SIEM Detection Management\n\n#\n---------------------------------------------------------------------------\n#\n\n\nimage: python:3.12\n\n\n#\n---------------------------------------------------------------------------\n#\n\n# Global Configuration\n\n#\n---------------------------------------------------------------------------\n#\n\n\nbefore_script:\n  - apt-get update && apt-get install -y jq\n  - pip install --upgrade pip\n  - pip install -r requirements.txt\n\n#\n---------------------------------------------------------------------------\n#\n\n\nstages:\n  - fetch\n  - test\n  - process\n  - upload\n\n#\n---------------------------------------------------------------------------\n#\n\n# Fetch Stage\n\n#\n---------------------------------------------------------------------------\n#\n\n\nfetch_changed_files:\n  stage: fetch\n  Script:\n    - echo \"Fetching changed files...\"\n    - git branch\n    - git fetch origin $CI_DEFAULT_BRANCH:$CI_DEFAULT_BRANCH --depth 2000\n    - |\n      if [[ \"$CI_COMMIT_BRANCH\" == \"$CI_DEFAULT_BRANCH\" ]]; then\n        git diff --name-status HEAD^1...HEAD > changed-files-temp.txt\n      else\n        git fetch origin $CI_COMMIT_BRANCH:$CI_COMMIT_BRANCH --depth 2000\n        git diff --name-status ${CI_DEFAULT_BRANCH}...${CI_COMMIT_SHA} > changed-files-temp.txt\n      fi\n    - grep -E '\\.json$' changed-files-temp.txt > changed-files.txt || true\n    - flake8 .\n    - pytest\n  artifacts:\n    paths:\n      - changed-files.txt\n    expose_as: 'changed_files'\n\n#\n---------------------------------------------------------------------------\n#\n\n# Test Stage\n\n#\n---------------------------------------------------------------------------\n#\n\n\nflake8:\n  stage: test\n  script:\n    - echo \"Running Flake8 for linting...\"\n    - flake8 .\n\npytest:\n  stage: test\n  script:\n    - echo \"Running Pytest for unit tests...\"\n    - pytest\n  artifacts:\n    when: always\n    reports:\n      junit: report.xml\n\n#\n---------------------------------------------------------------------------\n#\n\n# Process Stage\n\n#\n---------------------------------------------------------------------------\n#\n\n\nprocess_files:\n  stage: process\n  script:\n    - echo \"Processing changed files...\"\n    - git clone --depth 2000 --branch $CI_DEFAULT_BRANCH $CI_REPOSITORY_URL\n    - mkdir -p modified_rules delete_file new_file\n    - python3 move-files.py -x changed-files.txt\n    - python3 check-alerts-format.py\n  artifacts:\n    paths:\n      - modified_rules\n      - delete_file\n      - new_file\n#\n---------------------------------------------------------------------------\n#\n\n# Upload Stage\n\n#\n---------------------------------------------------------------------------\n#\n\n\nupdate_rules:\n  stage: upload\n  script:\n    - echo \"Uploading updated rules and lookup tables...\"\n    - git fetch origin $CI_DEFAULT_BRANCH:$CI_DEFAULT_BRANCH --depth 2000\n    - git clone --depth 2000 --branch $CI_DEFAULT_BRANCH $CI_REPOSITORY_URL \n    - python3 update-rules.py\n    - python3 update-exceptions.py\n    - python3 create_ttps_layers.py\n  rules:\n    - if: $CI_COMMIT_BRANCH == \"main\" && $CI_PIPELINE_SOURCE != \"schedule\"\n      changes:\n        - detections/**/*\n        - exceptions/**/*\n```\n\n\nThe diagram below illustrates the workflow of the CI/CD process described\nabove.\n\n\n```mermaid\n\ngraph TD;\n    fetch[Fetch Stage: Identify Changed Files] --> test[Test Stage: Run Linting and Tests];\n    test --> process[Process Stage: Categorize Files];\n    process --> upload[Upload Stage: Update Rules and Lookup Tables];\n    fetch --> fetch_details[Details: Filter JSON files, Output 'changed-files.txt'];\n    test --> test_details[Details: Run Flake8 for linting, Pytest for testing];\n    process --> process_details[Details: Categorize into 'modified', 'new', 'deleted', Prepare for upload];\n    upload --> upload_details[Details: Update repo, Update detections in SIEM and SIEM lookup table];\n```\n\n\n## Benefits and outcomes\n\n\nAutomating our detections lifecycle through a DaC CI/CD-powered workflow\nintroduces numerous benefits to our threat detection deployment process:\n\n\n* Automation: Automating the creation and validation of SIEM detections\nreduces manual errors and saves time.\n\n* Enhanced security: The CI-driven workflow enforces a \"least privilege\"\npolicy, ensuring consistency, peer reviews, and quality standards for\ncreating, updating, or deleting threat detections. \n\n* Efficiency: The standardized JSON detection format and automated creation\nexpedite the deployment process.\n\n* Collaboration: The MR and review process fosters collaboration and\nknowledge sharing among GitLab team members.\n\n* Version control: Treating threat detection as code abstracts the\ndetections from the SIEM platform they are ultimately stored in. This\nabstraction provides a historical record of changes, facilitates\ncollaboration, and enables rollbacks to previous configurations if issues\narise.\n\n\n## Get started with DaC\n\n\nUsing GitLab CI/CD and a \"least privilege\" policy has made our SIEM\ndetection and alert management easier and more secure. Automation has\nimproved efficiency and reduced risks, providing a helpful example for\nothers wanting to improve their security and compliance. You can try this\ntutorial by signing up for a [free trial of GitLab\nUltimate](https://about.gitlab.com/free-trial/).",[697,917,696,495,9],{"slug":1149,"featured":6,"template":700},"automating-cybersecurity-threat-detections-with-gitlab-ci-cd","content:en-us:blog:automating-cybersecurity-threat-detections-with-gitlab-ci-cd.yml","Automating Cybersecurity Threat Detections With Gitlab Ci Cd","en-us/blog/automating-cybersecurity-threat-detections-with-gitlab-ci-cd.yml","en-us/blog/automating-cybersecurity-threat-detections-with-gitlab-ci-cd",{"_path":1155,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1156,"content":1162,"config":1168,"_id":1170,"_type":14,"title":1171,"_source":16,"_file":1172,"_stem":1173,"_extension":19},"/en-us/blog/autoscale-ci-runners",{"title":1157,"description":1158,"ogTitle":1157,"ogDescription":1158,"noIndex":6,"ogImage":1159,"ogUrl":1160,"ogSiteName":685,"ogType":686,"canonicalUrls":1160,"schema":1161},"Autoscale GitLab CI/CD runners and save 90% on EC2 costs","Guest author Max Woolf shows how his team makes big savings with an autoscaling cluster of GitLab CI/CD runners.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680305/Blog/Hero%20Images/autoscale-gitlab-ci-runners.jpg","https://about.gitlab.com/blog/autoscale-ci-runners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Autoscale GitLab CI/CD runners and save 90% on EC2 costs\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Max Woolf\"}],\n        \"datePublished\": \"2017-11-23\",\n      }",{"title":1157,"description":1158,"authors":1163,"heroImage":1159,"date":1165,"body":1166,"category":718,"tags":1167},[1164],"Max Woolf","2017-11-23","At [Substrakt Health](https://substrakthealth.com/), we use continuous\nintegration workers to test our software every time new code is written and\npushed, but that computing capacity can be expensive and hard to predict.\nThis tutorial shows you how to set up an autoscaling [cluster of GitLab\nCI/CD](/topics/ci-cd/) runners using docker-machine and AWS.\n\n\n\u003C!-- more -->\n\n\nCode quality is **always** a top priority for us. We want to know that our\ncode works every time and when it stops working we want to know immediately.\nWe use [GitLab CI/CD](/solutions/continuous-integration/) to run our tests\nevery time we push new code and before every deployment. GitLab CI/CD lets\nus split this work across multiple servers and scale up and down capacity as\nrequired to keep costs down for us. This tutorial will show you how to set\nup an autoscaling CI/CD cluster for GitLab and save up to 90 percent on\ncosts using AWS EC2 Spot Instances.\n\n\nGitLab CI/CD allows us to split our jobs across multiple machines. By\ndefault, each new worker node requires some setup work to provision and\nattach it to our GitLab instance, but we can also use the autoscaling mode\nto provision a single machine and let that machine decide how much capacity\nis required and then spin up or down further instances as required.\n\n\n>**A warning**: This tutorial will not be covered entirely by the AWS free\nusage tier. It’s going to cost money to try this out.\n\n\n## Creating the spawner\n\n\nFirst off, we need a spawner machine. This runs 24/7 and checks that GitLab\nCI/CD has enough capacity to run the jobs currently in the queue. **It\ndoesn’t run any jobs itself.**\n\n\nWe use Ubuntu 16.04 LTS for our internal tooling, so just create an EC2\ninstance (*t2.micro* is enough and is included in the free tier.) Setting up\nVPCs and related subnets is out of the scope of this article, we’ll assume\nthat you’re working in the default VPC. Then we need to install a bunch of\nsoftware on our machine to set it up.\n\n\n## Installing gitlab-runner\n\n\ngitlab-runner is the main software we need to complete this task. Installing\nit on Ubuntu is really easy.\n\n\n```\n\ncurl -L\nhttps://packages.gitlab.com/install/repositories/runner/gitlab-ci-multi-runner/script.deb.sh\n| sudo bash\n\n```\n\n\n```\n\nsudo apt-get install gitlab-ci-multi-runner\n\n```\n\n\n\u003Cimg src=\"/images/blogimages/auto-scale-ci-runners-gif.gif\" alt=\"Installing\ngitlab-runner\" style=\"width: 700px;\"/>{: .shadow}\n\n\nOnce you’ve done that, register the runner on your GitLab instance. Do this\nas you normally would with any other GitLab CI/CD runner but choose\n**docker+machine** as the runner. Docker Machine is the software required to\nspin up new virtual machines and install Docker on them.\n\n\n## Installing Docker Machine\n\n\nDocker Machine is a handy bit of software that allows one host running\nDocker to spin up and provision other machines running Docker. Installing it\nis even easier:\n\n\n```\n\ncurl -L\nhttps://github.com/docker/machine/releases/download/v0.12.2/docker-machine-`uname\n-s`-`uname -m` >/tmp/docker-machine &&\n\nchmod +x /tmp/docker-machine &&\n\nsudo cp /tmp/docker-machine /usr/local/bin/docker-machine\n\n```\n\n\nThis will install the docker-machine binary in your PATH.\n\n\n## Configuring gitlab-runner\n\n\nBy default, gitlab-runner will not work in the autoscaling mode we want.\nIt’ll just run a job by default and then stop. We want to configure this\nmachine to no longer run tests but to spin up new Docker Machines as and\nwhen necessary. Open your gitlab-runner config file, usually found in\n`/etc/gitlab-runner/config.toml` and make some changes. This is our example\n(with sensitive information removed). Let’s go through some of the important\nlines.\n\n\n```\n\nconcurrent = 12\n\ncheck_interval = 0\n\n\n[[runners]]\n  name = \"aws-gitlab-runner-spawner\"\n  limit = 6\n  url = \"https://git.substrakt.com/ci\"\n  token = \"xxxxx\"\n  executor = \"docker+machine\"\n  [runners.docker]\n    tls_verify = false\n    image = \"ruby:2.3.1\"\n    privileged = true\n    disable_cache = false\n    volumes = [\"/cache\"]\n    shm_size = 0\n  [runners.machine]\n    IdleCount = 0\n    MachineDriver = \"amazonec2\"\n    MachineName = \"runner-%s\"\n    MachineOptions = [\"amazonec2-access-key=XXXX\", \"amazonec2-secret-key=XXXX\", \"amazonec2-ssh-user=ubuntu\", \"amazonec2-region=eu-west-2\", \"amazonec2-instance-type=m4.xlarge\", \"amazonec2-ami=ami-996372fd\", \"amazonec2-vpc-id=vpc-xxxxx\", \"amazonec2-subnet-id=subnet-xxxxx\", \"amazonec2-zone=a\", \"amazonec2-root-size=32\", \"amazonec2-request-spot-instance=true\", \"amazonec2-spot-price=0.03\"]\n    IdleTime = 1800\n```\n\n\n```\n\nconcurrent = 12\n\n```\n\n\nThis tells GitLab CI/CD that in total, it should attempt to run 12 jobs\nsimultaneously across all child workers.\n\n\n```\n\nlimit = 6\n\n```\n\n\nThis tells GitLab CI/CD that in total, it should use for running jobs a\nmaximum of six worker nodes. You’ll need to tweak this value depending on\nthe resources your jobs need and the resources of your child nodes. There’s\nno right answer here but generally we found it wasn’t a good idea to have\nmore than the number of CPUs – 1 of jobs running per node but again this is\na bit of a ‘finger-in-the-air’ calculation as it really depends on your tech\nstack.\n\n\n```\n\nIdleCount = 0\n\n```\n\n\nThis tells GitLab CI/CD not to run any machines constantly (whilst idle).\nThis means when nobody is running a job, or no jobs are queued to spin down\nall of the worker nodes after an amount of time (IdleTime at the bottom of\nthe file). We power our nodes down after half an hour of no use. This does\nhave the consequence of there being a short wait when we start our day, but\nit saves us money as we’re not using computing power when it’s not required.\n\n\nIf you're interested in more about how `concurrent`, `limit` and `IdleCount`\nare defining the maximum number of jobs and nodes that will be used, you can\nfind a more detailed description in Runner's autoscale configuration\ndocument: [Autoscaling algorithm and\nparameters](https://docs.gitlab.com/runner/configuration/autoscale.html#autoscaling-algorithm-and-parameters),\n[How parameters generate the upper limit of running\nmachines](https://docs.gitlab.com/runner/configuration/autoscale.html#how-concurrent-limit-and-idlecount-generate-the-upper-limit-of-running-machines).\n\n\n```\n\nMachineOptions = [\"amazonec2-access-key=XXXX\", \"amazonec2-secret-key=XXXX\",\n\"amazonec2-ssh-user=ubuntu\", \"amazonec2-region=eu-west-2\",\n\"amazonec2-instance-type=m4.xlarge\", \"amazonec2-ami=ami-996372fd\",\n\"amazonec2-vpc-id=vpc-xxxxx\", \"amazonec2-subnet-id=subnet-xxxxx\",\n\"amazonec2-zone=a\", \"amazonec2-root-size=32\",\n\"amazonec2-request-spot-instance=true\", \"amazonec2-spot-price=0.03\"]\n\n```\n\n\nThis is where the magic happens. This is where we set our options for Docker\nMachine. It defines the size, type and price of our runners. I’ll run\nthrough each of the non-obvious options.\n\n\n```\n\namazonec2-vpc-id=vpc-xxxxx & amazonec2-subnet-id=subnet-xxxxx\n\n```\n\n\nThis is the VPC and associated subnet ID. Generally, you’d want this in your\ndefault VPC in a public subnet. We run our jobs in a private VPC with\ninternal peering connections to other VPCs due to regulatory constraints.\n\n\n```\n\namazonec2-region=eu-west-2\n\n```\n\n\nThis is the AWS region. We run all of our infrastructure in the EU (London)\nregion.\n\n\n```\n\namazonec2-instance-type=m4.xlarge\n\n```\n\n\nThis is the size of the instance we want for each of our runners. This\nsetting can have massive implications on cost and it can be a tricky\nbalancing act. Choose too small and your jobs take forever to run due to a\nlack of resources (more time = more money) but choose too large and you have\nunused compute capacity which costs you money you don’t need to spend.\nAgain, there’s no right answer here, it’s about what works for your\nworkload. We found m4.xlarge works for us.\n\n\n## Save up to 90 percent on EC2 costs using Spot Instances\n\n\nSpot Instances are magic. They allow us to bid for unused capacity in the\nAWS infrastructure and often can mean that EC2 costs can be dramatically\nlower. We’re currently seeing discounts of around 85 percent on our EC2\nbills due to using Spot Instances. Setting them up for use on GitLab CI/CD\nis really easy too. There is (of course) a downside. If our bid price for\nVMs is exceeded, then our instances shut down with only a few minutes\nnotice. But as long as our bid is high enough, this isn’t an issue. Pricing\nin the spot market is insanely complex but in eu-west-2 at least, prices for\nm4.large and xlarge instances appear to have been static for months so a bid\n10-20 percent higher than the current spot price appears to be a safe bet.\nJust keep your eyes peeled. The current spot price for an m4.xlarge instance\nis $0.026. We’ve set our maximum price at $0.03 to give us some wiggle room.\nAt time of writing, the on-demand price is $0.232. The numbers speak for\nthemselves.\n\n\n>Note: Spot pricing can vary significantly between instance sizes, regions\nand even availability zones in the same region. This guide assumes that spot\npricing won’t vary massively or that you’ve set a good buffer above the\ncurrent spot price to avoid outages.\n\n\n```\n\namazonec2-request-spot-instance=true & amazonec2-spot-price=0.03\n\n```\n\n\nThis tells GitLab CI/CD that instead of just spawning new EC2 instances at\nfull price, that it should request Spot Instances at the current spot price,\nsetting a maximum bid that it should not exceed per hour, in USD (regardless\nof what currency you’re billed in. We’re billed in GBP, but Spot Instances\nare still calculated in USD.) The maximum bid is whatever you’re comfortable\npaying. We tend to set it close to the on-demand price because we’re looking\nfor any discount. As long as we’re not paying more than we otherwise would,\nit’s fine with us. Your financial constraints may affect your decisions\ndifferently.\n\n\n>Update: From October, AWS will charge in seconds, rather than hours used,\nmaking the potential savings even higher for unused partial hours.\n\n\nWe’d love to see how you get along with this so please let us know. You can\ncontact me max [at] substrakthealth [dot] com. For us, it’s saved us time\nand money and that’s never a bad thing.\n\n\n## About the Guest Author\n\n\nMax Woolf is a Senior Developer at Substrakt Health. Based in the UK, they\nuse innovative technology to transform how primary care providers organize\nand deliver care to patients in a sustainable NHS.\n\n\n_[Autoscale GitLab CI runners and save 90% on EC2\ncosts](https://substrakthealth.com/autoscale-gitlab-ci-runners-and-save-90-on-ec2-costs/)\nwas originally published on Substrakt Health's blog._\n\n\nCover image by [Sebastien Gabriel](https://unsplash.com/@sgabriel) on\nUnsplash\n\n{: .note}\n",[763,9],{"slug":1169,"featured":6,"template":700},"autoscale-ci-runners","content:en-us:blog:autoscale-ci-runners.yml","Autoscale Ci Runners","en-us/blog/autoscale-ci-runners.yml","en-us/blog/autoscale-ci-runners",{"_path":1175,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1176,"content":1182,"config":1189,"_id":1191,"_type":14,"title":1192,"_source":16,"_file":1193,"_stem":1194,"_extension":19},"/en-us/blog/aws-fargate-codebuild-build-containers-gitlab-runner",{"title":1177,"description":1178,"ogTitle":1177,"ogDescription":1178,"noIndex":6,"ogImage":1179,"ogUrl":1180,"ogSiteName":685,"ogType":686,"canonicalUrls":1180,"schema":1181},"Building containers with GitLab Runner & AWS Fargate executor","Build containers with the AWS Fargate Custom Executor for GitLab Runner and AWS CodeBuild","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667132/Blog/Hero%20Images/build-container-image-runner-fargate-codebuild-cover.jpg","https://about.gitlab.com/blog/aws-fargate-codebuild-build-containers-gitlab-runner","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to build containers with the AWS Fargate Custom Executor for GitLab Runner and AWS CodeBuild\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Elliot Rushton\"}],\n        \"datePublished\": \"2020-07-31\",\n      }",{"title":1183,"description":1178,"authors":1184,"heroImage":1179,"date":1186,"body":1187,"category":718,"tags":1188},"How to build containers with the AWS Fargate Custom Executor for GitLab Runner and AWS CodeBuild",[1185],"Elliot Rushton","2020-07-31","AWS Fargate does not allow containers to run in privileged mode. This means\nDocker-in-Docker (DinD), which enables the building and running of container\nimages inside of containers, does not work with the [AWS Fargate Custom\nExecutor driver for GitLab\nRunner](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/fargate).\nThe good news is that users don't have to be blocked by this and may use a\ncloud-native approach to build containers, effectively leveraging a seamless\nintegration with AWS CodeBuild in the [CI/CD pipeline](/topics/ci-cd/).\n\n\nWe provide in-depth instructions on how to autoscale GitLab CI on AWS\nFargate in [GitLab Runner's\ndocumentation](https://docs.gitlab.com/runner/configuration/runner_autoscale_aws_fargate/index.html).\nIn this blog post, we explain how to instrument CI containers and source\nrepositories to trigger AWS CodeBuild and use it to build container images.\n\n\n## Architecture overview\n\n\n![AWS Fargate + CodeBuild: a cloud-native approach to build containers with\nGitLab\nRunner](https://about.gitlab.com/images/blogimages/build-container-image-runner-fargate-codebuild.png)\n\nHow distinct CI workloads run on Fargate.\n\n{: .note.text-center}\n\n\nThe picture above illustrates distinct GitLab CI workloads running on\nFargate. The container identified by `ci-coordinator (001)` is running a\ntypical CI job which does not build containers, so it does not require\nadditional configuration or dependencies. The second container,\n`ci-coordinator (002)`, illustrates the problem to be tackled in this post:\nThe CI container includes the AWS CLI in order to send content to an Amazon\nS3 Bucket, trigger the AWS CodeBuild job, and fetch logs.\n\n\n## Prerequisites\n\n\nOnce these prerequisites are configured, you can dive into the six-step\nprocess to configure CI containers and source repositories to trigger AWS\nCodeBuild and use it to build container images.\n\n\n- The [AWS Fargate Custom Executor driver for GitLab\nRunner](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/fargate)\nmust be set-up appropriately.\n\n- Ensure the AWS IAM user permissions include the ability to create and\nconfigure S3 and CodeBuild resources.\n\n- AWS IAM user or service role with permissions to upload files to S3, start\nCodeBuild jobs, and read CloudWatch Logs.\n\n- AWS IAM user with permissions to create and configure IAM Policies and\nUsers.\n\n\n## Step 1: Create an AWS S3 bucket\n\n\n1. In the top menu of [AWS Management\nConsole](https://aws.amazon.com/console/) click Services.\n\n1. In the Storage section, select `S3`.\n\n1. Click `Create bucket`.\n\n1. Choose a descriptive name (`ci-container-build-bucket` will be used as\nexample) and select your preferred region.\n\n1. Leave all other fields with default values and click `Create bucket`.\n\n1. In the Buckets list, click the name of the bucket you created.\n\n1. Click `Create folder`.\n\n1. Give it the `gitlab-runner-builds` name.\n\n1. Click `Save`.\n\n\n## Step 2: Create an AWS CodeBuild Project\n\n\n1. Using the AWS Console, click `Services` in the top menu\n\n1. Select `CodeBuild` in the Developer Tools section\n\n1. Click `Create build project`\n\n1. In `Project Name` enter `ci-container-build-project`\n\n1. In `Source provider` select `Amazon S3`\n\n1. In `Bucket` select the `ci-container-build-bucket` created in step one\n\n1. In S3 object key or S3 folder enter `gitlab-runner-builds/build.zip`\n\n1. In `Environment image`, select `Managed image`\n\n1. For `Operating system` select your preferred OS from the available\noptions\n\n1. For `Runtime(s)`, choose `Standard`.\n\n1. For `Image`, select `aws/codebuild/standard:4.0`\n\n1. For `Image version`, select `Always use the latest image for this runtime\nversion`\n\n1. For `Environment type` select `Linux`\n\n1. Check the `Privileged` flag\n\n1. For the `Service role` select `New service role` and note the sugggested\n`Role name`\n\n1. For `Build specifications` select `Use a buildspec file`\n\n1. Scroll down to the bottom of the page and click \"Create build project\"\n\n\n## Step 3: Build the CI container image\n\n\nAs stated in Autoscaling GitLab CI on AWS Fargate, a [custom container is\nrequired](https://docs.gitlab.com/runner/configuration/runner_autoscale_aws_fargate/index.html#step-1-prepare-a-base-container-image-for-the-aws-fargate-task)\nto run GitLab CI jobs on Fargate. Since the solution relies on communicating\nwith S3 and CodeBuild, you'll need to [have the AWS CLI\ntool](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html)\navailable in the CI container.\n\n\nInstall the `zip` tool to make S3 communication smoother. As an example of a\nUbuntu-based container, the lines below must be added to the CI container's\n`Dockerfile`:\n\n\n```dockerfile\n\nRUN apt-get update -qq -y \\\n    && apt-get install -qq -y curl unzip zip \\\n    && curl -Lo awscliv2.zip https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip \\\n    && unzip awscliv2.zip \\\n    && ./aws/install\n```\n\n\n## Step 4: Add CodeBuild configuration to the repository\n\n\nBy default, CodeBuild looks for a file named `buildspec.yml` in the build\nsource. This file will instruct CodeBuild on how to build and publish the\nresulting container image. Create this file with the content below and\ncommit it to the git repository (_if you changed the **Buildspec name** when\nconfiguring the CodeBuild project [in Step 2](#buildspec), please create the\nfile accordingly_):\n\n\n```yaml\n\nversion: 0.2\n\n\nphases:\n  install:\n    commands:\n      - nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://127.0.0.1:2375 --storage-driver=overlay2&\n      - timeout 15 sh -c \"until docker info; do echo .; sleep 1; done\"\n  build:\n    commands:\n      - echo Build started on `date`\n      - docker -v\n      - docker build -t \u003CIMAGE-TAG> .\n      - echo Build completed on `date`\n```\n\n\n## Step 5: Set up the GitLab CI job\n\n\nNow we will set up the GitLab CI job that will pull everything together.\n\n\n### Interacting with CodeBuild through the AWS CLI\n\n\nThe CI job will need to interact with AWS Cloud to start CodeBuild jobs,\npoll the status of the jobs, and fetch logs. Commands such as `aws\ncodebuild` and `aws logs` help to tackle this, so let's use them in a\nscript, `codebuild.sh`:\n\n\n```bash\n\n#!/bin/bash\n\n\nbuild_project=ci-container-build-project\n\nbuild_id=$(aws codebuild start-build --project-name $build_project --query\n'build.id' --output text)\n\nbuild_status=$(aws codebuild batch-get-builds --ids $build_id --query\n'builds[].buildStatus' --output text)\n\n\nwhile [ $build_status == \"IN_PROGRESS\" ]\n\ndo\n    sleep 10\n    build_status=$(aws codebuild batch-get-builds --ids $build_id --query 'builds[].buildStatus' --output text)\ndone\n\n\nstream_name=$(aws codebuild batch-get-builds --ids $build_id --query\n'builds[].logs.streamName' --output text)\n\ngroup_name=$(aws codebuild batch-get-builds --ids $build_id --query\n'builds[].logs.groupName' --output text)\n\n\naws logs get-log-events --log-stream-name $stream_name --log-group-name\n$group_name --query 'events[].message' --output text\n\necho Codebuild completed with status $build_status\n\n```\n\n\n### Add a job to build the resulting container\n\n\nOnce the steps one through five are complete, the source repository will be\nstructured as follows:\n\n\n```plaintext\n\n/sample-repository\n  ├── .gitlab-ci.yml\n  ├── buildspec.yml\n  ├── codebuild.sh\n  ├── Dockerfile\n  ├── \u003CAPPLICATION-FILES>\n```\n\n\nThe final step to build the container is to add a job to `.gitlab-ci.yml`:\n\n\n```yaml\n\ndockerbuild:\n  stage: deploy\n  script:\n    - zip build.zip buildspec.yml Dockerfile \u003CAPPLICATION-FILES>\n    - aws configure set default.region \u003CREGION>\n    - aws s3 cp build.zip s3://ci-container-build-bucket/gitlab-runner-builds/build.zip\n    - bash codebuild.sh\n```\n\n\nBelow are some definitions from terms in the script:\n\n\n- `\u003CAPPLICATION-FILES>` is a placeholder for the files that will be required\nto successfully build the resulting container image using the `Dockerfile`,\ne.g., `package.json` and `app.js` in a Node.js application\n\n- `Dockerfile` is used to build the resulting image. _Note: It is not the\nsame file used to build the CI container image, mentioned in [Step 3: Build\nthe CI container image](#step-3-build-the-ci-container-image)_\n\n- Zip and AWS CLI must be installed in the CI container to make the script\nwork – refer to [Step 3: Build the CI container\nimage](#step-3-build-the-ci-container-image) for details\n\n\n## Step 6: Set up AWS credentials\n\n\nThe final step is to set up the AWS credentials. As we already mentioned,\nthe CI job will interact with AWS through the AWS CLI to perform a number of\noperations, and to do that, the AWS CLI needs to authenticate as an IAM user\nwith the permissions listed below. We recommend you create a new user and\ngrant it minimal privileges instead of using your personal AWS user account.\nFor the sake of simplicity, we suggest this approach to complete this\nwalk-through guide.\n\n\nThis AWS user only needs programmatic access and do not forget to make note\nof its Access key ID and Secret access key – they will be needed later. A\nsimple way to grant only the minimal privileges for the new user is to\ncreate a customer managed policy since it can be directly attached to the\nuser. A group might also be used to grant the same privileges for more\nusers, but it is not mandatory for running the sample workflow.\n\n\n- S3\n\n  ```json\n  {\n    \"Effect\": \"Allow\",\n    \"Action\": \"s3:PutObject\",\n    \"Resource\": \"arn:aws:s3:::ci-container-build-bucket/gitlab-runner-builds/*\"\n  }\n  ```\n\n- CodeBuild\n\n  ```json\n  {\n    \"Effect\": \"Allow\",\n    \"Action\": [\"codebuild:StartBuild\", \"codebuild:BatchGetBuilds\"],\n    \"Resource\": \"arn:aws:codebuild:\u003CREGION>:\u003CACCOUNT-ID>:project/ci-container-build-project\"\n  }\n  ```\n\n- CloudWatch Logs\n\n  ```json\n  {\n    \"Effect\": \"Allow\",\n    \"Action\": \"logs:GetLogEvents\",\n    \"Resource\": \"arn:aws:logs:\u003CREGION>:\u003CACCOUNT-ID>:log-group:/aws/codebuild/ci-container-build-project:log-stream:*\"\n  }\n  ```\n\nThe access credentials can be provided to AWS CLI through GitLab CI\nenvironment variables. Please go to your GitLab Project's **CI/CD\nSettings**, click **Expand** in the **Variables** section, add\n`AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` with the values you got from\nthe AWS Management Console after creating the IAM user. See the image below\nfor the result you can expect:\n\n\n![Providing AWS credentials for GitLab\nRunner](https://about.gitlab.com/images/blogimages/build-container-image-runner-fargate-codebuild-credentials.png)\n\n\nUsing an IAM Role and [Amazon ECS temporary/unique security\ncredentials](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html)\nis also possible, but not covered in this tutorial.\n\n{: .note.text-center}\n\n\n## Step 7: It's showtime\n\n\nWith all configurations in place, commit the changes and trigger a new\npipeline to watch the magic happen!\n\n\n### Just need the highlights?\n\n\n1. The CI job script added in [Step\n5](#add-a-job-to-build-the-resulting-container) compresses the resulting\ncontainer image build files into `build.zip`\n\n1. `build.zip` is then uploaded to the S3 Bucket we created in [Step 1:\nCreate an Amazon S3 Bucket](#step-1-create-an-amazon-s3-bucket)\n\n1. Next, `codebuild.sh` starts a CodeBuild job based on the project created\nin [Step 2: Create an AWS CodeBuild\nProject](#step-2-create-an-aws-codebuild-project) (Note: that project has an\nS3 object as its source provider)\n\n1. Finally, the CodeBuild job downloads `gitlab-runner-builds/build.zip`\nfrom S3, decompresses it and – from `buildspec.yml`– builds the resulting\ncontainer image\n\n\nA sample repository, demonstrating everything described in the article is\navailable\n[here](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/codebuild-on-fargate-example/).\n\n\n## Cleanup\n\n\nIf you want to perform a cleanup after testing the custom executor with AWS\nFargate and CodeBuild, you should remove the following objects:\n\n\n- AWS S3 bucket created in [Step 1](#step-1-create-an-amazon-s3-bucket)\n\n- AWS CodeBuild project created in [Step\n2](#step-2-create-an-aws-codebuild-project)\n\n- `RUN` command added to the CI container image in [Step\n3](#step-3-build-the-ci-container-image)\n\n- The `buildspec.yml` file created in [Step\n4](#step-4-add-codebuild-configuration-to-the-repository)\n\n- The `codebuild.sh` file created in [Step\n5](#step-5-set-up-the-gitlab-ci-job)\n\n- The `dockerbuild` job added to `.gitlab-ci.yml` in [Step\n5](#step-5-set-up-the-gitlab-ci-job)\n\n- IAM policy, user (and maybe group) created in [Step\n6](#step-6-set-up-aws-credentials)\n\n- GitLab CI/CD variables in [Step 6](#step-6-set-up-aws-credentials)\n\n\nRead more about GitLab and AWS:\n\n-[How autoscaling GitLab CI works on AWS\nFargate](/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate/)\n\n-[GitLab 12.10 released with Requirements Management and Autoscaling CI on\nAWS Fargate](/releases/2020/04/22/gitlab-12-10-released/)\n\n-[Announcing 32/64-bit Arm Runner Support for AWS\nGraviton2](/blog/gitlab-arm-aws-graviton2-solution/)\n\n\nCover image by [Lucas van Oort](https://unsplash.com/@switch_dtp_fotografie)\non [Unsplash](https://unsplash.com)\n\n{: .note}\n",[9,232,917],{"slug":1190,"featured":6,"template":700},"aws-fargate-codebuild-build-containers-gitlab-runner","content:en-us:blog:aws-fargate-codebuild-build-containers-gitlab-runner.yml","Aws Fargate Codebuild Build Containers Gitlab Runner","en-us/blog/aws-fargate-codebuild-build-containers-gitlab-runner.yml","en-us/blog/aws-fargate-codebuild-build-containers-gitlab-runner",{"_path":1196,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1197,"content":1203,"config":1209,"_id":1211,"_type":14,"title":1212,"_source":16,"_file":1213,"_stem":1214,"_extension":19},"/en-us/blog/aws-gitlab-serverless-webcast",{"title":1198,"description":1199,"ogTitle":1198,"ogDescription":1199,"noIndex":6,"ogImage":1200,"ogUrl":1201,"ogSiteName":685,"ogType":686,"canonicalUrls":1201,"schema":1202},"How to deploy AWS Lambda applications with ease","Highlights from our serverless webcast with AWS exploring the Serverless Application Model.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666262/Blog/Hero%20Images/default-blog-image.png","https://about.gitlab.com/blog/aws-gitlab-serverless-webcast","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy AWS Lambda applications with ease\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-04-29\",\n      }",{"title":1198,"description":1199,"authors":1204,"heroImage":1200,"date":1205,"body":1206,"category":1040,"tags":1207},[715],"2020-04-29","\n\nIn the [Cloud Native Computing Foundation (CNCF) 2019 survey](https://www.cncf.io/blog/2019-cncf-survey-results-are-here-deployments-are-growing-in-size-and-speed-as-cloud-native-adoption-becomes-mainstream/), 41% of respondents use serverless technology. Among those using serverless, 80% use a hosted platform vs. 20% who use installable software. Of the 80% using a hosted platform, the top tool is AWS Lambda (53%).\n\nAs organizations continue to explore the power and scalability of serverless computing, AWS Lambda remains a large part of the conversation. On April 9, AWS and GitLab hosted a serverless webcast to demonstrate how teams can use [GitLab CI/CD](/topics/ci-cd/) and the AWS Serverless Application Model (SAM) to build, test, and deploy Lambda applications. For the serverless webcast, we showed attendees how to:\n\n*   Use and install the AWS SAM CLI\n*   Create a SAM application including a Lambda function and API\n*   Build, test, and deploy the application using GitLab CI/CD\n\nWhether you’re an AWS customer, a serverless newbie, or wanting to explore new ways to utilize GitLab CI/CD, this webcast had something for everyone. We’ve compiled some highlights from the discussion and a link to the on-demand webcast.\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nWatch the webcast with AWS and GitLab to learn all about serverless - [Tune in here](/webcast/aws-gitlab-serverless/)!\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n\n## What is the Serverless Application Model (SAM)?\n\nTooling and workflows are the biggest roadblocks to adopting serverless. Organizations love the scalability and automation of serverless but don’t believe that they have the tools to implement it effectively. In this webcast, we showed how teams can seamlessly use SAM with GitLab CI/CD for their serverless application development.\n\n[AWS SAM](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/what-is-sam.html) is an open source framework for building serverless applications on AWS. It can be considered an extension to CloudFormation that makes it easier to define and deploy AWS resources – such as Lambda functions, API Gateway APIs and DynamoDB tables – commonly used in serverless applications.\n\nIn addition to its templating capabilities, SAM also includes a CLI for testing and deployment allows teams to define the resources they need as code. So that includes the serverless functions, but can also include any of the rest of the AWS suite of tools. SAM works by taking all of those things and creates a cloud formation stack from a SAM template. Next it automatically deploys those various functions and other AWS components and gets the IAM configured correctly between all of them so that an application can run not only Lambda functions, but also leverage the rest of the AWS stack to create an entire system and application.\n\n\n## Why is SAM a great tool for enterprise teams?\n\nSenior developer evangelist at GitLab, [Brendan O’Leary](/company/team/#brendan), is a Node.js developer at heart. \"For better or worse,\" he laughs. When putting together the presentation, he noted that SAM offered templates not only in Python but in Node.js as well. \"I think applies directly to the enterprise because those teams are going to be diverse. They're going to have different needs, they're going to choose the language that best fits those needs. It was great to have this starter template that I could start with Node.js 12X to really get started on coding in a comfortable environment for me.\"\n\nThe SAM templates can also be an asset for enterprise teams because they streamline a lot of the backend work. In the project presented during the webcast, we were able to start from a SAM template to orchestrate the IAM permissions we needed instead of coding all of the cloud formation ourselves. For a large or distributed team, this makes SAM a great out-of-the-box tool for serverless applications.\n\n\n## The benefits of going serverless\n\nRam Dileepan, solutions architect at AWS, highlighted this quote from AWS CTO Werner Vogels: No server is easier to manage than no server at all. \"The main goal of modern application development is to automate and abstract as much as possible from the customer. So what we do as an AWS cloud, we abstract a lot of the details from developers so they can actually focus on building applications instead of working with infrastructure.\"\n\nFor teams looking to incorporate serverless, it can provide a number of benefits:\n\n*   Scalable\n*   Pay for what you use\n*   Availability\n\n\n## Serverless and microservices best practices\n\nWhile serverless means that, from the developer perspective, servers are not actively managed, there is still work to do. When you design the application, you have to design how are you going to monitor it and what you are going to monitor. \"Even when you go to serverless, you can actually just follow the standard development best practices,\" says Ram. Here he presented his three serverless/microservices best practices:\n\n*   Treat your infrastructure the way you treat your code\n*   Set up an automated integration and deployment pipeline\n*   Build with monitoring and observability from day one\n\nIn addition to going over the SAM CLI and creating a GitLab CI/CD pipeline, Brendan O’Leary and Ram Dileepan also fielded a variety of questions in the live Q&A. To watch the full webcast and learn more about serverless with GitLab and AWS, click the link below or in the header.\n\n[Watch our serverless webcast Ram Dileepan of AWS and Brendan O'Leary of GitLab 🍿](/webcast/aws-gitlab-serverless/)\n{: .alert .alert-gitlab-purple}\n",[1208,830,9],"webcast",{"slug":1210,"featured":6,"template":700},"aws-gitlab-serverless-webcast","content:en-us:blog:aws-gitlab-serverless-webcast.yml","Aws Gitlab Serverless Webcast","en-us/blog/aws-gitlab-serverless-webcast.yml","en-us/blog/aws-gitlab-serverless-webcast",{"_path":1216,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1217,"content":1223,"config":1229,"_id":1231,"_type":14,"title":1232,"_source":16,"_file":1233,"_stem":1234,"_extension":19},"/en-us/blog/beginner-guide-ci-cd",{"title":1218,"description":1219,"ogTitle":1218,"ogDescription":1219,"noIndex":6,"ogImage":1220,"ogUrl":1221,"ogSiteName":685,"ogType":686,"canonicalUrls":1221,"schema":1222},"GitLab’s guide to CI/CD for beginners","CI/CD is a key part of the DevOps journey. Here’s everything you need to understand about this game-changing process.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681391/Blog/Hero%20Images/beginnercicd.jpg","https://about.gitlab.com/blog/beginner-guide-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab’s guide to CI/CD for beginners\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-07-06\",\n      }",{"title":1218,"description":1219,"authors":1224,"heroImage":1220,"date":1225,"body":1226,"category":718,"tags":1227},[1037],"2020-07-06","\n\nContinuous integration and [continuous delivery/deployment](/topics/continuous-delivery/) (most often referred to as CI/CD) are the cornerstones of [DevOps](/topics/devops/) and any modern software development practice. Here’s everything you need to know about [CI/CD for beginners](/blog/how-to-keep-up-with-ci-cd-best-practices/).\n\n## What CI/CD means\n\nIf your software development process involves a lot of stopping, starting and handoffs, [CI/CD](/topics/ci-cd/) may be just what you’re looking for. A CI/CD pipeline is a seamless way for developers to make changes to code that are then automatically tested and pushed out for delivery and deployment. The goal is to eliminate downtime. Get CI/CD right and you’re well on the road to successful DevOps and dramatically faster code release. In our [2020 Global DevSecOps Survey](/blog/devsecops-survey-released/), nearly 83% of survey takers said they’re getting code out the door more quickly thanks to DevOps.\n\n## Understand CI/CD basics\n\nIf you’re not sure what a pipeline is, or how the entire process works, here’s a [detailed explanation](/blog/a-beginners-guide-to-continuous-integration/) of how all the moving parts work together to make software development quicker and easier.\n\n## Four benefits of CI/CD\n\nYes, CI/CD helps speed up delivery of code but it also makes for happier software developers. At a time when there continues to be [a worldwide shortage of software developers](https://www.gartner.com/en/newsroom/press-releases/2019-01-17-gartner-survey-shows-global-talent-shortage-is-now-the-top-emerging-risk-facing-organizations), it’s critical to retain technical talent. Developer job satisfaction is just one of [four key benefits](/blog/positive-outcomes-ci-cd/) that come from implementing a CI/CD process.\n\n## How to pick the right CI/CD tool\n\nNow that you’re sold on the [benefits of CI/CD](/topics/ci-cd/benefits-continuous-integration/) it’s time to choose a tool. There are a number of considerations, from [budget to room for growth](/topics/ci-cd/choose-continuous-integration-tool/) so it’s worth taking the time to think it through.\n\n## How to make the business case for CI/CD\n\nTo tie a CI/CD process to ROI isn’t difficult, but it’s an important step to take to get management buy-in. Here are [three factors to consider](/blog/modernize-your-ci-cd/) – including the hidden cost of toolchain sprawl – as you make the case for CI/CD.\n\n## Take 20 minutes and build a CI/CD pipeline\n\nOk, enough talking about theoreticals... it’s time to do something. Using GitLab’s [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) functionality, you can [move from code to production](/blog/building-a-cicd-pipeline-in-20-mins/) in just two simple steps and in only 20 minutes (no, really, just 20 minutes).\n\n## Next stop: Kubernetes!\n\nFinally, you can tie your GitLab CI pipeline into Google Kubernetes Engine (GKE) and as a bonus it takes only 15 minutes. Our [step-by-step tutorial](/blog/gitlab-ci-on-google-kubernetes-engine/) is completely beginner-friendly.\n\n**Level up your CI/CD knowledge:**\n\n[How CI can put the \"Sec\" in DevSecOps](/blog/solve-devsecops-challenges-with-gitlab-ci-cd/)\n\n[Autoscale GitLab CI with AWS Fargate](/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate/)\n\n[Get started with parent-child pipelines](/blog/parent-child-pipelines/)\n\nCover image by [Kyle Glenn](https://unsplash.com/@kylejglenn) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9,721,1228],"kubernetes",{"slug":1230,"featured":6,"template":700},"beginner-guide-ci-cd","content:en-us:blog:beginner-guide-ci-cd.yml","Beginner Guide Ci Cd","en-us/blog/beginner-guide-ci-cd.yml","en-us/blog/beginner-guide-ci-cd",{"_path":1236,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1237,"content":1243,"config":1249,"_id":1251,"_type":14,"title":1252,"_source":16,"_file":1253,"_stem":1254,"_extension":19},"/en-us/blog/best-practices-for-kubernetes-runners",{"title":1238,"description":1239,"ogTitle":1238,"ogDescription":1239,"noIndex":6,"ogImage":1240,"ogUrl":1241,"ogSiteName":685,"ogType":686,"canonicalUrls":1241,"schema":1242},"Best practices to keep your Kubernetes runners moving","In a presentation at GitLab Commit San Francisco, a senior software engineer from F5 Networks shares some best practices for working with Kubernetes runners.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681341/Blog/Hero%20Images/trackandfield.jpg","https://about.gitlab.com/blog/best-practices-for-kubernetes-runners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Best practices to keep your Kubernetes runners moving\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-05-27\",\n      }",{"title":1238,"description":1239,"authors":1244,"heroImage":1240,"date":1246,"body":1247,"category":718,"tags":1248},[1245],"Sara Kassabian","2020-05-27","Sometimes in software engineering, you have to learn the hard way. GitLab CI\nis extremely powerful and flexible, but it’s also easy to make mistakes that\ncould take out a GitLab runner, which can clog up Sidekiq and bring down\nyour entire GitLab instance.\n\n\nLuckily, Sean Smith, senior software engineer for F5 Networks has been\nthrough it, and summarizes some of their learnings in [his talk at GitLab\nCommit San Francisco](https://www.youtube.com/watch?v=Hks5ElUxkP4). In the\npresentation, Sean goes in-depth about a past incident that clogged up F5\nNetwork's GitLab runner, and shares tips on setting limits for Kubernetes\n(K8s) runners.\n\n\nSean is a GitLab administrator for [F5 Networks](https://www.f5.com/), a\ncompany with about 1,800 users worldwide running 7,500 projects each month –\nexcluding forks. That’s roughly 350,000 - 400,000 CI jobs going through the\nK8s runners each month. Until some recent hires, there were only three\nengineers to handle it all.\n\n\nInstead of running a giant GitLab instance on one VM, F5 broke up their\ninstance into seven different servers: Two HA web servers, one PostGres\nserver, PostGres replica, Sidekiq, Gitaly (our Git filesystem), and Redis.\n\n\n## Keep your GitLab runners up and moving\n\n\nF5 uses two types of GitLab runners:\n\n\n*   Kubernetes: About 90% of F5 jobs go through K8s\n\n*   Docker: Docker machine is run on-prem and in the cloud\n\n\n**Why use Docker?** F5 uses Docker to configure cluster networks in\ndifferent jobs as well as for unit testing. Since the Docker machine can run\non-prem and also in the cloud, it’s easy to have a VM dedicated to the job\nthat allows you to manage those Docker images and Docker containers and set\nup your cluster networking topology within Docker, so you can run your tests\nand tear it down afterward without affecting other users. This isn’t\nsomething that is really possible in Kubernetes runners.\n\n\nOtherwise, F5 Networks uses Kubernetes, but keeping your K8s up and running\nisn’t necessarily foolproof.\n\n\n### CI jobs can spawn\n\n\nSometimes, a seemingly benign coding error can create unanticipated\nconsequences for your Kubernetes runners.\n\n\nOne time, an F5 Engineer decided to use a GitLab CI job to automatically\nconfigure different settings on various jobs and projects. It made sense to\nconfigure using GitLab CI because the engineer wanted to be able to use [Git\nfor version control](/topics/version-control/). Version control makes it\neasier for the team to iterate on the code transparently. He wrote the code\nto run the job.\n\n\nBut, he didn’t read the fine print in the library he was using. The code he\nwrote looked for the project ID, and if it found the project ID, runs the\npipeline once per hour at the 30-minute mark. The assumption was that if\nthere was already a matching scheduled task, the create function would not\ncreate a duplicate. Unfortunately, this was not the case. The code he ran\ncaused the number of CI jobs to grow exponentially.\n\n\n![The code that clogged the K8s runner with GitLab CI jobs for F5\nNetworks](https://about.gitlab.com/images/blogimages/problemcode.png){:\n.shadow}\n\nThe code that clogged the K8s runner with GitLab CI jobs for F5 Networks.\nCan you see the problem yet?\n\n{: .note.text-center}\n\n\n\"You schedule a job, then next you schedule another job so now you've got\ntwo jobs scheduled, and then you've got four jobs scheduled, and then eight,\nafter 10 iterations, you get around the 1,024 jobs scheduled and after\n1,532,000 jobs, if this was allowed to run for 24 hours, you would end up\nwith 16.7 million jobs being scheduled by the 24th hour,\" says Sean.\n\n\nIn short: Chaos. Remember, F5 Networks has a CI pipeline capacity of 350,000\nto 400,000 jobs per month, so 16.7 million jobs in 24 hours could easily\nclog the system, taking down the K8s nodes, as well as GitLab nodes.\n\n\nLuckily, there’s a simple enough fix. First, identify which project is\ncausing the problem, and disable CI on the project so it can’t create any\nnew jobs. Next, kill all the pending jobs by [running this\nsnippet](https://gitlab.com/snippets/1924269).\n\n\n```\n\n# gitlab-rails console\n\np = Project.find_by_full_path(‘rogue-group/rogue-project’)\n\nCi::Pipeline.where(project_id: p.id).where(status: ‘pending’).each {|p|\np.cancel}\n\nexit\n\n```\n\n\nIt’s really a judgment call whether to kill a running job or not. If a job\nis currently running and is going to take all of 30 seconds then maybe don’t\nbother killing it, but if the job is going to take 30 minutes then consider\nkilling it to free up resources for your users.\n\n\nF5 learned a lesson here and set up a monitoring alert to help ensure the\njob queue doesn’t back up like that again. The Cron job checks to make sure\nF5 is not exceeding a preestablished threshold on the number of jobs in a\npending state. The alert links to a dashboard and also includes the full\nplaybook for how to resolve the problem (because let’s face it, nobody is at\ntheir best when troubleshooting bleary-eyed at 3 a.m.). At first there were\nsome false positives, but now the alerting has been fine-tuned and the\nsystem saved F5 from two outages so far.\n\n\n### Push it to the limit\n\n\nThe fact is, nobody has an unlimited cloud budget, and even if you're\non-prem, resources are even more constrained for users that rely upon\nhardware. Sean says that F5 soon realized that, to meet the needs of all\nusers, sensible limits had to be established so one or two mega-users didn't\ndevour all their resources. He has some tips on how to set limits in your\nKubernetes and GitLab runners.\n\n\nWhile some users may be disgruntled that cloud limits exist and are\nenforced, the best method is to keep an open dialogue with users about the\nlimits while recognizing that projects expand and grow over a period of\ntime.\n\n\nFortunately you can set the limits yourself and don’t have to rely on the\ngoodwill of your users to conserve CPU. Kubernetes allows limits by default,\nand GitLab supports K8s request and limits. The K8s scheduler uses requests\nto determine which nodes to run the workload on. Limits will kill a job if\nthe job exceeds the predefined limit – there can be different requests and\nlimits but if requests aren’t specified and limits are, the scheduler will\nuse the limits to determine the request value.\n\n\n[Take a peek at what F5 configured the limits for their Kubernetes GitLab\nrunner](https://gitlab.com/snippets/1926912).\n\n\n```ruby\n\nconcurrent = 200\n\nlog_format = \"json\"\n\n[[runners]]\n  name = \"Kubernetes Gitlab Runner\"\n  url = \"https://gitlab.example.com/ci\"\n  token = \"insert token here\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    namespace = \"gitlab-runner\"\n    service-account = \"gitlab-runner-user\"\n    pull_policy = \"always\"\n\n    # build container\n    cpu_limit = \"2\"\n    memory_limit = \"6Gi\"\n\n    # service containers\n    service_cpu_limit = \"1\"\n    service_memory_limit = \"1Gi\"\n\n    # helper container\n    helper_cpu_limit = \"1\"\n    helper_memory_limit = \"1Gi\"\n```\n\n\n\"We have got currency of 200 jobs, so it will at max spawn 200 jobs and\nyou'll see that we are limiting the CPU use on the build container to two\nand memory to six gigabytes, and on the helper and service CPU and memory\nlimits, we have one CPU and one gig of memory each,\" says Sean. \"And so it\ngives you that flexibility to break it out because generally, you don't\nnecessarily need as much CPU or as much memory on a service that you're\nspending up in your CI job.\"\n\n\n## What comes first: Setting up Kubernetes runners or establishing limits?\n\n\n[DevOps](/topics/devops/) is a data-driven practice, so the idea of setting\nlimits to conserve resources without any underlying data about what users\nare doing can seem counterintuitive. If you’re migrating to Kubernetes\nrunners from a Docker runner or a shell runner, it’s easy enough to\nextrapolate the numbers to establish limits as you set up your Kuberntes\nrunners.\n\n\nIf you’re brand-new to GitLab and GitLab CI, then it’s kind of a shot in the\ndark. Think about your bills and resource constraints: How much memory and\nCPU is available? Is anything else running on your K8s cluster. Chances are,\nyour guesses will be incorrect – but that’s OK.\n\n\nIt might sound obvious, but if you’re running a hosted application on the\nsame K8s cluster as your GitLab CI jobs, don’t set limits based on the\ncapacity of a full K8s cluster. Ideally, you’d have a separate K8s cluster\nfor GitLab CI jobs, but that isn’t always possible.\n\n\n### How F5 Networks did it\n\n\nF5 Networks started with a small team of roughly 50 people and maybe 100\nprojects in GitLab – so setting a limit on K8s wasn’t a major concern until\nthe company and, as a result, projects, started to grow.\n\n\nOnce it came time to set limits to their preexisting K8s runners, the first\nstep was to enable the K8s metric server to monitor how their users consume\nresources. The next step was to determine what users are doing. Sean\nrecommends using a tool like Grafana or Prometheus, which has a native\nintegration within GitLab (although, F5 used a tool called K9), to extract\nthe data from the K8s metric server and display it on some sort of dashboard\nusing Grafana or Prometheus.\n\n\n## Some more tips for Kubernetes runners\n\n\n### Cutting them off: Enforcing limits\n\n\nOnce a user hits their limit, most of the time the end result is their job\ngets killed. Usually the user will notice a mistake, go in, and fix their\ncode, but most likely they will just ask for more resources.\n\n\nThe best way to determine whether or not to allocate more of your finite\nresources to a user is to determine need, Sean explains. Ask the user to\nreturn to you with concrete numbers about the amount of RAM or CPU they\nrequire. But if you don’t have the resources, then don’t overextend\nyourselves to the detriment of your other users.\n\n\n### Use labels to reveal more data\n\n\n[Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set)\nmake it easier to identify workloads in Kubernetes, and can be expanded to\nenvironmental variables within GitLab, for example, job = \"$CI_JOB_ID\" and\nproject = \"$CI_PROJECT_ID\". Labels can be used by admins who are manually\ndoing Quebectal commands against K8s or they can be used in reporting tools\nlike Prometheus or Grafana for setting limits. But labels are the most\nvaluable when it comes to debugging purposes.\n\n\nBear in mind, labels are finicky in Kubernetes. [There are certain\ncharacters (stay away from \"?\") that can cause jobs to\nfail](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4565). There is a\n63 character limit on labels. If there is an unsupported character or the\nlabel is too long, the job won’t start. There won’t be a really good\nindication as to why your job wouldn’t start either, which can be a pain for\ntroubleshooting. [Bookmark this page to learn more about labels in\nKubernetes](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set)\n(including its limitations).\n\n\nGitLab users that run on K8s need to be cautious not to overburden the\nrunner with GitLab CI jobs, and ought to consider setting limits on CPU to\nconserve valuable resources.\n\n\nWant to learn more about how F5 manages their Kubernetes runners on their\nGitLab instance? Watch Sean's presentation at GitLab Commit San Francisco in\nthe video below.\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/Hks5ElUxkP4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n## Learn more\n\n\n* [Read on](/solutions/kubernetes/) to learn more about how GitLab and\nKubernetes work together, and explore our plans for future integration with\nKubernetes.\n\n\n* Explore the official documentation on [Kubernetes\nexecutor](https://docs.gitlab.com/runner/executors/kubernetes.html), which\ncovers everything from choosing options in your configuration file to giving\nGitLab Runner access to the Kubernetes API, environment variables, volumes,\nhelper containers, security context, privileged mode, secret volume, and\nremoving old runner pods.\n\n\nCover Photo by [Kolleen\nGladden](https://unsplash.com/@rockthechaos?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non\n[Unsplash](https://unsplash.com/s/photos/track-and-field?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n\n{: .note.text-center}\n",[1228,9,763],{"slug":1250,"featured":6,"template":700},"best-practices-for-kubernetes-runners","content:en-us:blog:best-practices-for-kubernetes-runners.yml","Best Practices For Kubernetes Runners","en-us/blog/best-practices-for-kubernetes-runners.yml","en-us/blog/best-practices-for-kubernetes-runners",{"_path":1256,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1257,"content":1263,"config":1270,"_id":1272,"_type":14,"title":1273,"_source":16,"_file":1274,"_stem":1275,"_extension":19},"/en-us/blog/best-practices-leading-orgs-to-release-software-faster",{"title":1258,"description":1259,"ogTitle":1258,"ogDescription":1259,"noIndex":6,"ogImage":1260,"ogUrl":1261,"ogSiteName":685,"ogType":686,"canonicalUrls":1261,"schema":1262},"4 best practices leading orgs to release software faster","GitLab's 2023 Global DevSecOps Survey illuminates the strategies that organizations deploying more frequently have in common.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663908/Blog/Hero%20Images/2023-devsecops-report-blog-banner2.png","https://about.gitlab.com/blog/best-practices-leading-orgs-to-release-software-faster","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"4 best practices leading orgs to release software faster\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kristina Weis\"}],\n        \"datePublished\": \"2023-06-08\",\n      }",{"title":1258,"description":1259,"authors":1264,"heroImage":1260,"date":1266,"body":1267,"category":1040,"tags":1268},[1265],"Kristina Weis","2023-06-08","\nReleasing software faster is one of the biggest goals of many organizations — and for good reason. It helps them keep up with competitors, land and keep more customers, improve employee satisfaction, and much more. But maintaining that velocity requires investment in processes and technologies that help DevSecOps teams deliver, secure, and deploy software faster without compromising quality.\n\nIn our [2023 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/) we asked more than 5,000 development, security, and operations professionals about everything from deployment frequency to the practices teams have adopted – all to learn what the most agile and efficient organizations have in common. One respondent, a director of IT security in the retail sector, summed up the challenge as follows: “Software customers are increasingly vocal and demanding, expecting faster releases and greater customizability. Developers will need to keep up with these demands while still maintaining stability and usability.”\n\nSo what’s helping organizations be more productive and efficient? Here are four of the best practices that, according to the survey, help organizations release software faster and deploy more frequently:\n\n## 1. Running applications in the cloud\nOne of the benefits people commonly attribute to deploying to the cloud is increased development speed. As it turns out, this year’s survey shows there’s some serious truth to that. Respondents with at least a quarter of their applications in the cloud were 2.2 times more likely to be releasing software faster than they were a year ago — and respondents with at least half of their applications in the cloud were 4.2 times more likely to deploy to production multiple times per day.\n\nSeveral respondents commented on the value of the cloud while also acknowledging the complexities cloud computing can bring to software development. An IT operations manager in the industrial manufacturing sector shared that “developing software that is designed for the cloud-native environment” is one of the top challenges facing software development this year. Likewise, an IT operations manager in the telecommunications sector said: “With the increase in the use of cloud computing and IoT devices, there is a greater need for secure coding practices to protect sensitive data from cyber attacks.” As organizations move to a cloud-first model for software development, they will need to adopt technologies that allow them to build natively in the cloud while keeping security top of mind throughout the development process.\n\n## 2. BizDevOps\nThough DevOps and DevSecOps mostly steal the show in terms of methodologies, some organizations go a step further and [practice BizDevOps](https://about.gitlab.com/blog/a-snapshot-of-modern-devops-practices-today/) — that is, incorporating business teams alongside development, security, and operations teams. An IT operations manager in the software sector emphasized the importance of collaboration with the business, sharing that “as software projects become larger and more complex, developers will need to work closely with other team members, including designers, testers, project managers, and business stakeholders.” This approach appears to be paying off for some: Respondents whose organizations practice BizDevOps were 1.4 times more likely to be releasing software faster than they were a year ago.\n\n## 3. CI/CD\nIt’s not surprising that automating the software development lifecycle with [CI/CD](https://docs.gitlab.com/ee/ci/) would help teams release software faster and more efficiently; however, it’s nice to see confirmation and put some numbers to the difference it can make. The survey shows that respondents [practicing CI/CD](https://about.gitlab.com/blog/how-to-keep-up-with-ci-cd-best-practices/) were twice as likely to deploy multiple times per day and 1.2 times more likely to release software faster than they did a year ago.\n\nDespite the value of CI/CD for driving efficiency, respondents also identified challenges. For instance, an IT operations associate in the aerospace/defense sector pointed to “management that doesn't understand CI/CD at all” as a blocker to more efficient software development. Meanwhile, a software development intern in the biotech sector shared that “tools to automate CI/CD, together with code editors, APM software, and defect trackers, can help with a faster and quality development cycle,” but “companies are hesitant to spend on tools that can help increase their developers’ productivity.” These responses underscore the value of investing in tools that unify CI/CD with other DevSecOps practices — such as incorporating security early in the development process and creating tighter feedback loops — to help organizations break down development silos.\n\n## 4. DORA and other metrics\nOrganizations that [make a conscious effort to track key development metrics](https://about.gitlab.com/blog/how-zoopla-uses-dora-metrics-and-your-team-can-too/) are more likely to improve them, according to the survey. This makes sense because by virtue of an organization choosing to track a metric, they’re signaling to their teams that it’s important, likely reminding them of whether the metric is improving (or not) periodically, and quite possibly prioritizing initiatives aimed at improving those metrics. We found that respondents whose organizations track their [DORA metrics](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html) and other similar metrics were 1.4 times more likely to deploy multiple times per day.\n\n## A deeper dive on productivity and efficiency\n\nFor a deeper look into release velocity and deployment frequency, and all the practices that made respondents more likely to release software faster and deploy multiple times per day, check out our [2023 DevSecOps Report: Productivity & Efficiency Within Reach](https://about.gitlab.com/developer-survey/).\n\nThe report also digs into two other key factors that can have a big impact on productivity and efficiency: how long it takes to onboard new developers and how difficult or easy it is for organizations to attract, hire, and retain developers. We’ll show you where things stand and the practices that made respondents more likely to be successful.\n\n_[Read the highlights from “Security Without Sacrifices,” the first report in our 2023 Global DevSecOps Report series.](/blog/gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops/)_\n",[1269,9,830,696],"developer survey",{"slug":1271,"featured":6,"template":700},"best-practices-leading-orgs-to-release-software-faster","content:en-us:blog:best-practices-leading-orgs-to-release-software-faster.yml","Best Practices Leading Orgs To Release Software Faster","en-us/blog/best-practices-leading-orgs-to-release-software-faster.yml","en-us/blog/best-practices-leading-orgs-to-release-software-faster",{"_path":1277,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1278,"content":1284,"config":1290,"_id":1292,"_type":14,"title":1293,"_source":16,"_file":1294,"_stem":1295,"_extension":19},"/en-us/blog/betstudios-cto-on-improving-ci-cd-capabilities-with-gitlab-premium",{"title":1279,"description":1280,"ogTitle":1279,"ogDescription":1280,"noIndex":6,"ogImage":1281,"ogUrl":1282,"ogSiteName":685,"ogType":686,"canonicalUrls":1282,"schema":1283},"Betstudios CTO on improving CI/CD capabilities with GitLab Premium","Read why Betstudios upgraded to GitLab Premium and how their software development process has changed in this Q&A with their CTO Rafael Campuzano.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663000/Blog/Hero%20Images/tanukilifecycle.png","https://about.gitlab.com/blog/betstudios-cto-on-improving-ci-cd-capabilities-with-gitlab-premium","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Betstudios CTO on improving CI/CD capabilities with GitLab Premium\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kristina Weis\"}],\n        \"datePublished\": \"2023-12-06\",\n      }",{"title":1279,"description":1280,"authors":1285,"heroImage":1281,"date":1286,"body":1287,"category":1288,"tags":1289},[1265],"2023-12-06","Since joining [Betstudios](https://betstudios.com/en/) (soon to be part of WA. Technology Group) as their CTO earlier this year, Rafael Campuzano has been focused on finding ways to help the software development team save time, automate manual processes, and have a better experience.\n\nThe team had been using GitLab’s free tier for source code management and version control for several years, but they wanted to improve their CI/CD capabilities so they made the decision to upgrade to GitLab Premium. We talked with Rafael about what the team has already been able to do, how their software development process has changed, and what they’re planning to do next.\n\n__What led you to upgrade from GitLab’s free tier to GitLab Premium?__\n\nThe main motivation when I joined Betstudios was to have the service hosted outside our office servers and remove the hassle of having to manage it ourselves, so we decided to move to the SaaS model. Once that decision was made, we needed a certain level of reliability but mainly we wanted to improve our [CI/CD capabilities](https://about.gitlab.com/topics/ci-cd/), which was the reason for upgrading to the Premium plan.\n\n__What made you choose GitLab?__\n\nBetstudios was already using GitLab and I had experience from previous companies and knew that GitLab is a quite complete tool for managing code and beyond. Besides, I also liked the openness and the overall culture of the company, based on transparency and innovation.\n\n__How has your software development process changed since adopting GitLab Premium?__\n\nWe’re a small team and haven’t been able to take full advantage of all the power that GitLab Premium brings yet, but we’ve started with some deployment automations and that’s taken away a lot of unnecessary work — and mistakes — from the teams.\n\n__What benefits have you seen since you started using GitLab Premium?__\n\nWe’re just getting started, but we have already saved around five hours per week per Team Lead by automating much of the deployment process, and soon we expect to take that to 10 hours per week of time saved. Now they can dedicate this time to better planning, code reviews, and even coding.\n\n__You also moved from GitLab's free tier to GitLab Premium at your last company. How did that experience affect your decision to upgrade to GitLab Premium at Betstudios?__\n\nI was responsible for infrastructure at [EveryMatrix](https://about.gitlab.com/customers/everymatrix/), which included the support of the GitLab servers on-prem. We needed HA (high availability), so we decided to move to the Premium plan. However, we soon realized we made many development teams happy, because they wanted to use the more advanced features that the Premium plan was coming with. The development teams showed it was a great decision for them, and our relationship with GitLab the company was always great, so when I arrived at Betstudios it was a clear move to make.\n\n__What would you like to do next with GitLab’s DevSecOps Platform?__\n\nI would like to explore the CI/CD capabilities with Kubernetes, and I’m encouraging our Engineering teams to use all the capabilities like code reviews, advanced merge requests, and CI/CD more and more. I’ve also seen that many companies are using ArgoCD in combination with GitLab and I’m going to explore if there is a way to do everything just using GitLab.\n\n__Do you have any advice for teams getting started with GitLab?__\n\nIt is an all-in-one platform, so you do not need to build complex systems for different tasks around your code production, methodologies, and operations. GitLab is easy to use and has a great team that will help you to get the most out of it.\n\n> Read more GitLab customer stories on our [customers page](https://about.gitlab.com/customers/).","customer-stories",[696,495,9,720],{"slug":1291,"featured":6,"template":700},"betstudios-cto-on-improving-ci-cd-capabilities-with-gitlab-premium","content:en-us:blog:betstudios-cto-on-improving-ci-cd-capabilities-with-gitlab-premium.yml","Betstudios Cto On Improving Ci Cd Capabilities With Gitlab Premium","en-us/blog/betstudios-cto-on-improving-ci-cd-capabilities-with-gitlab-premium.yml","en-us/blog/betstudios-cto-on-improving-ci-cd-capabilities-with-gitlab-premium",{"_path":1297,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1298,"content":1304,"config":1309,"_id":1311,"_type":14,"title":1312,"_source":16,"_file":1313,"_stem":1314,"_extension":19},"/en-us/blog/better-devops-with-gitlab-ci-cd",{"title":1299,"description":1300,"ogTitle":1299,"ogDescription":1300,"noIndex":6,"ogImage":1301,"ogUrl":1302,"ogSiteName":685,"ogType":686,"canonicalUrls":1302,"schema":1303},"Unlock better DevOps with GitLab CI/CD","Why a single application helps to eliminate silos and knowledge gaps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670652/Blog/Hero%20Images/dev-to-devops-cover.png","https://about.gitlab.com/blog/better-devops-with-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Unlock better DevOps with GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-10-18\",\n      }",{"title":1299,"description":1300,"authors":1305,"heroImage":1301,"date":1306,"body":1307,"category":1040,"tags":1308},[715],"2019-10-18","\nWe’ve talked about how the [seamless collaboration between Development and IT operations is a beautiful thing](/topics/devops/build-a-devops-team/). When an organization has a healthy DevOps culture, they’re able to meet business objectives and increase delivery speed. DevOps is meant to eliminate silos so everyone can get on the same page, and the tools you use can play a big role in just how successful, or unsuccessful, your DevOps strategy is.\n\n## Complicated tools create silos\n\nOne of the ways that operations can be at a disadvantage is by having to maintain a [complicated plug-in environment](/blog/plugin-instability/). This scenario becomes especially problematic when things go wrong and developers are relying on a specific group to fix the problem. While specialization isn’t necessarily a bad thing (devs shouldn’t have to do ops, and vice versa), usually the expertise needed to manage a plugin environment is a specialization within an already specialized group.\n\nJenkins is the most popular example of this kind of complexity, for a few reasons:\n\n*   **Jenkins architecture requires maintaining a large set of build environment systems**: At scale, this requires many dedicated people to manage machines, install and manage build tools (NodeJS, Python, Java, et al.), monitor machines, etc.\n\n*   **Upgrading is a risk (Jenkins or plug-ins)**: There is a good chance that upgrades can cause processes to fail, leading to broken builds or downtime.\n\n*   **Groovy is hard to maintain**: This isn't a widely popular script language, so it is harder to find experts to manage it and it's hard to debug due to a lack of debuggers.\n\n*   **Jenkins does not support any kind of clustering or failover**: The web UI is run on a web container known as Jenkins master, and you can only have one. For a large team of developers needing to use Jenkins all at once, that one instance needs to be very closely monitored with limited permissions.\n\nA large Jenkins plug-in environment creates silos within silos and knowledge gaps that are hard to overcome. What this leads to is a “throw it over the wall” team dynamic: Because the system depends on the expertise of a very limited number of people, developers have to submit code and hope their experts have the skills to manage it.\n\n## Lack of visibility keeps teams in the dark\n\nIn order for [DevOps](/topics/devops/) to thrive there needs to be an understanding of what every team is doing and clarity around processes. Unfortunately, a tool like Jenkins doesn’t necessarily facilitate this. Because users can’t see other users’ commits, they can’t visualize the SDLC as a whole. This only isolates teams even further.\n\nTeams that work within this plug-in environment often download the plug-ins they need, which makes it hard for Jenkins admins to standardize across teams. That, in turn, makes it harder for admins to manage the dependencies and maintain plug-ins properly, which can lead to more broken builds.\n\nWhile plug-ins are a common way to add functionality into a toolchain, it doesn’t address the problems of a toolchain that hinder teams trying to implement DevOps:\n\n*   Lack of visibility\n*   Knowledge gaps\n*   Work silos\n\n## Why single application CI/CD makes better DevOps\n\nAs a complete [DevOps platform](/solutions/devops-platform/) delivered as a single application, we provide a tool that covers all parts of the SDLC from one interface. CI and CD are just one part of the lifecycle, and by having functionality like [SCM, Issue tracking, Security testing, and Monitoring](/solutions/jenkins/) built right in, we’re making it easier for teams to work with DevOps best practices.\n\nIf you would like to see a demo of GitLab CI/CD and how we compare to Jenkins, and access other curated content around CI/CD, you can watch our most recent webcast.\n\n[Watch the demo.](/blog/migrating-from-jenkins/)\n{: .alert .alert-gitlab-purple .text-center}\n",[9,875,721],{"slug":1310,"featured":6,"template":700},"better-devops-with-gitlab-ci-cd","content:en-us:blog:better-devops-with-gitlab-ci-cd.yml","Better Devops With Gitlab Ci Cd","en-us/blog/better-devops-with-gitlab-ci-cd.yml","en-us/blog/better-devops-with-gitlab-ci-cd",{"_path":1316,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1317,"content":1322,"config":1328,"_id":1330,"_type":14,"title":1331,"_source":16,"_file":1332,"_stem":1333,"_extension":19},"/en-us/blog/beyond-application-modernization-trends",{"title":1318,"description":1319,"ogTitle":1318,"ogDescription":1319,"noIndex":6,"ogImage":951,"ogUrl":1320,"ogSiteName":685,"ogType":686,"canonicalUrls":1320,"schema":1321},"Beyond trends: Committing to application modernization","How to overcome analysis paralysis and take your digital transformation efforts from theory to practice.","https://about.gitlab.com/blog/beyond-application-modernization-trends","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Beyond trends: Committing to application modernization\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Erica Lindberg\"}],\n        \"datePublished\": \"2019-02-25\",\n      }",{"title":1318,"description":1319,"authors":1323,"heroImage":951,"date":1325,"body":1326,"category":1040,"tags":1327},[1324],"Erica Lindberg","2019-02-25","\n\nJust commit. What’s so hard about that? In truth, there’s a reason why commitment phobia is a punchline and it’s tough to settle on a place to go to dinner, let alone make a critical choice like when or [how to start the application modernization process](/blog/application-modernization-best-practices/).\n\nFor starters, there are so many questions to ask. For example:\n\n  1. What is the status quo of each software initiative?\n  1. Which applications are driving value for the business? Which aren’t?\n  1. When and how should I break my monolith into microservices? What’s the risk?\n  1. Should I move to the cloud – private, public, hybrid?\n  1. Everyone is talking about containers and Kubernetes, do I need this?\n\nThis is by no means an exhaustive list, but a sample of what might come up when considering where and how to start a digital transformation journey. Questions, buzzwords, and trends abound, and it can be easy to get trapped by analysis paralysis until enough time has gone by that indecision has become the decision.\n\nAccording to [Forrester’s Predictions 2019](https://go.forrester.com/blogs/tag/predictions-2019/), 25 percent of firms will decelerate digital efforts in 2019. For many organizations, slowing the pace of innovation directly results in lost market share due to more nimble competitors entering their space.\n\n> “In 2019, digital transformation moves from super-wide enterprise efforts to a pragmatic, surgical portfolio view of digital investments with the goal of making incremental and necessary changes to operations. – Forrester Predictions 2019\n\nThe key to starting and committing to the application modernization process is to start small and scale up as you learn. Following trends is not going to bring the organizational change needed for a successful digital transformation. It takes practical, incremental, and iterative progress.\n\nHere are a few practical steps for getting started:\n\n## 1. Start small with a small team or innovation group and scale up from there.\n\nTrying to make a decision on how to proceed with digital transformation across your entire organization is a monumental task. You risk introducing a lot of variable change all at once that can turn chaotic if not managed well. Starting with a small team or innovation group reduces the stress and minimizes the initial impact of getting started. [Behavioral science experts call this the “pick one and go” method](https://bsci21.org/9-tips-to-avoid-paralysis-by-analysis/) for overcoming analysis paralysis. Essentially, if you are overwhelmed or unsure about all of your options, just pick one and try it. Collect feedback, evaluate the outcome, iterate, and scale up from there.\n\nWhen choosing a team or developing an innovation group, avoid thinking along legacy lines which divide teams by stages of the software lifecycle. Think about building a cross-functional team of 8–12 people who can focus on developing the culture, process, and tools needed to continuously deliver software.\n\n## 2. Make smaller changes.\n\nKeep in mind that the impetus for digital transformation and, more specifically, application modernization, is driven from a business need to deliver value to customers faster. So, making smaller changes to release faster is the single most important change you can make.\n\nAdopt the mindset: what is the smallest possible change I can make to improve something, and how do I get it out as quickly as possible? At GitLab, we call this the [minimally viable change (MVC)](https://handbook.gitlab.com/handbook/product/product-principles/#the-minimal-viable-change-mvc), and it’s what allows us to ship nearly anything within a single release. This is especially important when approaching legacy software. If you start making a ton of big changes over a few weeks, the risk of breaking something and not understanding what change caused the error grows exponentially with every change.\n\nWith an MVC mindset, you can experiment with what works best without risking downtime. Smaller changes are easier to review, understand, and roll back if necessary.\n\n## 3. Prioritize mastering continuous delivery and deployment (CD).\n\nYou have your team assembled, you’ve made MVC your mantra, and now it’s time to establish a clear goal. If you’re just [starting down the application modernization road](/blog/application-modernization-examples/), chances are that you don’t quite know what strategy is going to work for your organization yet (that’s what the innovation group is for!). What you do know is that you need to be able to ship features to production faster while maintaining stability and security. By prioritizing understanding your current deployment pipeline and how to [automate to achieve continuous delivery](/topics/continuous-delivery/), you discover how the underlying infrastructure needs to change.\n\nAuthor Gary Gruver outlines this philosophy in his book, [\"Starting and Scaling DevOps in the Enterprise\"](/resources/scaling-enterprise-devops/). He writes:\n\n> It is my personal experience that creating, documenting, automating, and optimizing deployment pipelines in large software/IT organizations is key to improving their efficiency and effectiveness. – Gary Gruver\n\nStart with a single application and document how a change goes from idea all the way to production and monitoring. This will give you a good understanding of how it’s currently operating, what its dependencies are, and how you can start to decouple.\n\nFinally, the end goal is to enable teams with [fully automated CI/CD pipelines](https://docs.gitlab.com/ee/topics/autodevops/) so developers can get their code to production faster. Taking both a cultural and technological approach to change is needed to adopt DevOps methodology.\n\nAre you ready to commit to your digital transformation journey? [Get inspired and learn how Ask Media Group modernized their architecture and development with microservices, containers, and kubernetes](/webcast/cloud-native-transformation/).\n",[999,9,721],{"slug":1329,"featured":6,"template":700},"beyond-application-modernization-trends","content:en-us:blog:beyond-application-modernization-trends.yml","Beyond Application Modernization Trends","en-us/blog/beyond-application-modernization-trends.yml","en-us/blog/beyond-application-modernization-trends",{"_path":1335,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1336,"content":1342,"config":1349,"_id":1351,"_type":14,"title":1352,"_source":16,"_file":1353,"_stem":1354,"_extension":19},"/en-us/blog/build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow",{"title":1337,"description":1338,"ogTitle":1337,"ogDescription":1338,"noIndex":6,"ogImage":1339,"ogUrl":1340,"ogSiteName":685,"ogType":686,"canonicalUrls":1340,"schema":1341},"Build an ML app pipeline with GitLab Model Registry using MLflow","Learn how to manage your ML apps entirely through GitLab with this tutorial. Also discover the role machine learning operations, or MLOps, plays in automating the DevSecOps lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749660151/Blog/Hero%20Images/blog-image-template-1800x945__26_.png","https://about.gitlab.com/blog/build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Build an ML app pipeline with GitLab Model Registry using MLflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gufran Yeşilyurt, OBSS\"},{\"@type\":\"Person\",\"name\":\"Péter Bozsó\"}],\n        \"datePublished\": \"2024-09-17\",\n      }",{"title":1337,"description":1338,"authors":1343,"heroImage":1339,"date":1346,"body":1347,"category":849,"tags":1348},[1344,1345],"Gufran Yeşilyurt, OBSS","Péter Bozsó","2024-09-17","__*Editor's note: From time to time, we invite members of our partner\ncommunity to contribute to the GitLab Blog. Thanks to Gufran Yeşilyurt, a\nDevOps consultant at OBSS Technology, for co-creating with us.*__\n\n\nThis tutorial will walk you through setting up an MLOps pipeline with GitLab\nModel Registry, utilizing MLflow. This will be a great starting point to\nmanage your ML apps entirely through GitLab. But first, it is crucial to\nunderstand why we need MLOps and what GitLab offers.\n\n\n[MLOps](https://about.gitlab.com/direction/modelops/mlops/#overview), or\nmachine learning operations, is a critical practice for managing and\nautomating the lifecycle of machine learning models, from development to\ndeployment and maintenance. Its importance lies in addressing the complexity\nand dynamism of machine learning workflows, which involve not just software\ndevelopment but also data management, model training, testing, deployment,\nand continuous monitoring.\n\n\nMLOps ensures that models are reproducible, scalable, and maintainable,\nfacilitating collaboration between data scientists, machine learning\nengineers, and operations teams. By incorporating MLOps, organizations can\nstreamline the deployment process, reduce time to market, and improve the\nreliability and performance of their machine learning applications.\n\n\nThe necessity of MLOps arises from the unique challenges posed by machine\nlearning projects. Unlike traditional software development, machine learning\ninvolves handling large datasets, experimenting with various models, and\ncontinuously updating models based on new data and feedback.\n\n\nWithout proper operations, managing these aspects becomes cumbersome,\nleading to potential issues like model drift, where the model's performance\ndegrades over time due to changes in the underlying data. MLOps provides a\nstructured approach to monitor and manage these changes, ensuring that\nmodels remain accurate and effective. Moreover, it introduces automation in\nvarious stages, such as data preprocessing, model training, and deployment,\nthereby reducing manual errors and enhancing efficiency.\n\n\nGitLab's features play a pivotal role in implementing MLOps effectively.\nGitLab provides an integrated platform that combines source code management,\n[CI/CD pipelines](https://about.gitlab.com/topics/ci-cd/), tracking and\ncollaboration tools, making it ideal for managing machine learning projects.\n\n\nWith GitLab, teams can leverage version control to track changes in both\ncode and data, ensuring reproducibility and transparency. The CI/CD\npipelines in GitLab automate the testing and deployment of machine learning\nmodels, allowing for continuous integration and continuous delivery. This\nautomation not only speeds up the deployment process but also ensures\nconsistency and reliability in the models being deployed. \n\n\nAdditionally, GitLab's collaboration features, such as merge requests and\ncode reviews, facilitate better communication and coordination among team\nmembers, ensuring that everyone is aligned and any issues are promptly\naddressed.\n\n\nPrerequisites:\n\n- basic knowledge of GitLab pipelines\n\n- basic knowledge of MLflow\n\n- a Kubernetes cluster\n\n- Dockerfile\n\n\nThis tutorial includes instructions to:\n\n- [Set up environment variables of\nMLflow](#set-up-environment-variables-of-mlflow)\n\n- [Train and log candidates at merge\nrequest](#train-and-log-candidates-at-merge-request)\n\n- [Register the most successful\ncandidate](#register-the-most-successful-candidate)\n\n- [Dockerize and deploy an ML app with the registered\nmodel](#dockerize-and-deploy-an-ml-app-with-the-registered-model)\n\n\nIn this example, to decide whether to provide the user a loan, we make use\nof Random Forest Classifier, Decision Tree, and Logistic Regression. At the\nend of this showcase, we will have a web application that utilizes machine\nlearning to respond to the user.\n\n\nTo reproduce this example in your own GitLab environment, you can read the\nrest of this article or follow the video below. You can find the source code\nof this example in [these OBSS\nrepositories](https://gitlab.com/gitlab-partners-public/obss).\n\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/grNJAp1xAi0?si=Bf9CAP9lB1uWErOZ\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n## Set up environment variables of MLflow\n\n\nOn the host where the code is executed, set the environment variables for\ntracking URI and token. This might be a remote host, CI pipeline, or your\nlocal environment. When they are set, you can call\n`mlflow.set_experiment(\"\u003Cexperiment_name>\")`. As a reference:\n\n\n```\n\nexport MLFLOW_TRACKING_URI=\"\u003Cyour gitlab endpoint>/api/v4/projects/\u003Cyour\nproject id>/ml/mlflow\"\n\nexport MLFLOW_TRACKING_TOKEN=\"\u003Cyour_access_token>\"\n\n```\n\n\n**Note:** If the training code contains the call to\n`mlflow.set_tracking_uri()`, remove it.\n\n\n## Train and log candidates at merge request\n\n\nIn your model train code, you can use MLflow methods to log metrics,\nartifacts, and parameters. You can also divide the train steps into pipeline\nstages if you are comfortable with that part. In this example, one Python\nfile will be used for both training and report generation.\n\n\n```\n\nmlflow.log_params(params)\n\nmlflow.log_metrics(metrics_data)\n\nmlflow.log_artifact(artifacts)\n\n```\n\n\nYou can then create the necessary pipeline to train the experiment. By\nadding the relevant rules, you can trigger this pipeline manually in merge\nrequests and observe the report generated as MR Note.\n\n\nWhen the pipeline is finished, you can see the details about the candidate\nin **Analyze > Model Experiments**.\n\n\n![details about the candidate in the finished\npipeline](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676127/Blog/Content%20Images/Screenshot_1.png)\n\n\n## Register the most successful candidate\n\n\nAccording to the measurements you have made, we can register the most\nsuccessful candidate (may be the one with the highest accuracy value) with\nthe Run ID of the candidate.\n\n\nBut first, we need to create a model and its version in Registry. I created\nthese steps in separate stages and components (because I may need these\nsteps in other projects). You should be careful to use semantic versioning\nwhen versioning.\n\n\n### Register source model parameters and metrics\n\n\n```\n\nsource_candidate = client.get_run(source_candidate_id)\n\nparams = { k: v for k, v in source_candidate.data.params.items() }\n\nmetric = { k: v for k, v in source_candidate.data.metrics.items() }\n\n\nmodel_version = client.get_model_version(model_name, version)\n\nrun_id = model_version.run_id\n\nmodel_class = \"\"\n\nfor name, value in params.items():\n    client.log_param(run_id, name, value)\n    if name == \"Class\":\n        model_class = value\n\nfor name, value in metric.items():\n    client.log_metric(run_id, name, value)\n\n```\n\n\nAfter logging the parameters and metrics, you can [register the\nartifacts](https://gitlab.com/gitlab-partners-public/obss/mlops-loan-prediction/-/blob/main/register_candidate.py)\nas you did in the train step.\n\n\nYou may want to manually enter the inputs of the relevant steps as [a\nvariable in the\npipeline](https://gitlab.com/gitlab-partners-public/obss/components/-/blob/main/templates/register-candidate.yml).\n\n\n## CI/CD components\n\n\nI have used [CI/CD components](https://docs.gitlab.com/ee/ci/components/)\nbecause they provide a structured environment for managing machine learning\nworkflows. These components enable reusability by allowing teams to store\nand share standardized scripts, models, and datasets, ensuring that previous\nwork can be easily accessed, modified, and redeployed in future projects,\nthus accelerating development and reducing redundancy.\n\n\n> [Learn more about CI/CD components and the CI/CD\nCatalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/).\n\n\n## Dockerize and deploy an ML app with the registered model\n\n\nIn this project, while registering the model, I also register the pkl file\nas an artifact and then create the docker image with that artifact and send\nit to [GitLab Container\nRegistry](https://about.gitlab.com/blog/next-generation-gitlab-container-registry-goes-ga/).\n\n\nYou can now access your Docker image from the Container Registry and deploy\nit to your environment with the method you want.\n\n\n## Resources\n\n- [Model\nexperiments](https://docs.gitlab.com/ee/user/project/ml/experiment_tracking/)\n\n- [MLflow client\ncompatibility](https://docs.gitlab.com/ee/user/project/ml/experiment_tracking/mlflow_client.html)\n\n- [CI/CD components](https://docs.gitlab.com/ee/ci/components/)\n\n- [Building GitLab with GitLab: Why there is no MLOps without\nDevSecOps](https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops/)\n\n\n***Credits:**\n\nThis tutorial and the corresponding sample projects were created and\ngenerously shared with the community by [OBSS](https://obss.tech/en/). OBSS\nis an EMEA-based channel partner of GitLab. They have deep expertise across\nthe whole DevSecOps lifecycle and amongst many other things, they are more\nthan happy to support customers with migrating their MLOps workloads to\nGitLab.*\n",[851,917,9,283],{"slug":1350,"featured":91,"template":700},"build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow","content:en-us:blog:build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow.yml","Build An Ml App Pipeline With Gitlab Model Registry Using Mlflow","en-us/blog/build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow.yml","en-us/blog/build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow",{"_path":1356,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1357,"content":1362,"config":1368,"_id":1370,"_type":14,"title":1371,"_source":16,"_file":1372,"_stem":1373,"_extension":19},"/en-us/blog/building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way",{"title":1358,"description":1359,"ogTitle":1358,"ogDescription":1359,"noIndex":6,"ogImage":1339,"ogUrl":1360,"ogSiteName":685,"ogType":686,"canonicalUrls":1360,"schema":1361},"Building a GitLab CI/CD pipeline for a monorepo the easy way","Learn how to create a GitLab CI/CD pipeline for a monorepo to host multiple applications in one repository.","https://about.gitlab.com/blog/building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building a GitLab CI/CD pipeline for a monorepo the easy way\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sam Morris\"}],\n        \"datePublished\": \"2024-07-30\",\n      }",{"title":1358,"description":1359,"authors":1363,"heroImage":1339,"date":1365,"body":1366,"category":718,"tags":1367},[1364],"Sam Morris","2024-07-30","Monorepos allow you to host multiple applications’ code in a single\nrepository. In GitLab, that involves placing disparate application source\ncode in separate directories in one project. While this strategy allows for\nversion controlled storage of your code, it was tricky leveraging the full\npower of GitLab’s [CI/CD](https://about.gitlab.com/topics/ci-cd/) pipeline\ncapabilities… until now!\n\n\n## The ideal case: CI/CD in a monorepo\n\n\nSince you have more than one application’s code living in your repository,\nyou will want to have more than one pipeline configuration. For example, if\nyou have a .NET application and a Spring application in one project, each\napplication may have different build and test jobs to complete. Ideally, you\ncan completely decouple the pipelines and only run each pipeline based on\nchanges to that specific application’s source code.\n\n\nThe technical approach for this would be to have a project-level\n`.gitlab-ci.yml` pipeline configuration file that includes a specific YAML\nfile based on changes in a certain directory. The `.gitlab-ci.yml` pipeline\nserves as the control plane that triggers the appropriate pipeline based on\nthe changes made to the code.\n\n\n## The legacy approach\n\n\nPrior to GitLab 16.4, we were not able to include a YAML file based on\nchanges to a directory or file in a project. However, we could accomplish\nthis functionality via a workaround. \n\n\nIn our monorepo project, we have two directories for different applications.\nIn this example, there are `java` and `python` directories representing a\nJava and Python app, respectively. Each directory has an\napplication-specific YAML file to build each app. In the project’s pipeline\nfile, we simply include both application pipeline files, and do the logic\nhandling in those files directly.\n\n\n`.gitlab-ci.yml`:\n\n\n```\n\nstages:\n  - build\n  - test\n  - deploy\n\ntop-level-job:\n  stage: build\n  script:\n    - echo \"Hello world...\"\n\ninclude:\n  - local: '/java/j.gitlab-ci.yml'\n  - local: '/python/py.gitlab-ci.yml'\n\n```\n\n\nIn each application-specific pipeline file, we create a hidden job named\n.java-common or .python-common that only runs if there are changes to that\napp’s directory. [Hidden\njobs](https://docs.gitlab.com/ee/ci/jobs/#hide-jobs) do not run by default,\nand are often utilized to reuse specific job configurations. Each pipeline\nextends that hidden job to inherit the rules defining which files to watch\nfor changes, which would then initiate the pipeline job. \n\n\n`j.gitlab-ci.yml`:\n\n\n```\n\nstages:\n  - build\n  - test\n  - deploy\n\n.java-common:\n  rules:\n    - changes:\n      - '../java/*'\n\njava-build-job:\n  extends: .java-common\n  stage: build\n  script:\n    - echo \"Building Java\"\n\njava-test-job:\n  extends: .java-common\n  stage: test\n  script:\n    - echo \"Testing Java\"\n\n```\n\n\n`py.gitlab-ci.yml`:\n\n\n```\n\nstages:\n  - build\n  - test\n  - deploy\n\n.python-common:\n  rules:\n    - changes:\n      - '../python/*'\n\npython-build-job:\n  extends: .python-common\n  stage: build\n  script:\n    - echo \"Building Python\"\n\npython-test-job:\n  extends: .python-common\n  stage: test\n  script:\n    - echo \"Testing Python\"\n\n```\n\n\nThere are some downsides to this, including having to extend the job for\neach other job in the YAML file to ensure it complies with the rules,\ncreating a lot of redundant code and room for human error. Additionally,\nextended jobs cannot have duplicate keys, so you could not define your own\n`rules` logic in each job since there would be a collision in the keys and\ntheir [values are not\nmerged](https://docs.gitlab.com/ee/ci/yaml/index.html#extends). \n\n\nThis results in a pipeline running that includes the j.gitlab-ci.yml jobs\nwhen `java/` is updated, and py.gitlab-ci.yml when `python/` is updated. \n\n\n## The new approach: Conditionally include pipeline files\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/6phvk8jioAo?si=y6ztZODvUtM-cHmZ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\nIn GitLab 16.4, we introduced [`include` with `rules:changes` for\npipelines](https://docs.gitlab.com/ee/ci/yaml/includes.html#include-with-ruleschanges).\nPreviously, you could `include` with `rules:if`, but not `rules:changes`\nmaking this update extremely powerful. Now, you can simply use the `include`\nkeyword and define the monorepo rules in your project pipeline\nconfiguration. \n\n\nNew `.gitlab-ci.yml`:\n\n\n```\n\nstages:\n  - build\n  - test\n\ntop-level-job:\n  stage: build\n  script:\n    - echo \"Hello world...\"\n\ninclude:\n  - local: '/java/j.gitlab-ci.yml'\n    rules:\n      - changes:\n        - 'java/*'\n  - local: '/python/py.gitlab-ci.yml'\n    rules:\n      - changes:\n        - 'python/*'\n\n```\n\n\nThen each application’s YAML can just focus on building and testing that\napplication’s code, without extending a hidden job repeatedly. This allows\nfor more flexibility in job definitions and reduces code rewriting for\nengineers.\n\n\nNew `j.gitlab-ci.yml`:\n\n\n```\n\nstages:\n  - build\n  - test\n  - deploy\n\njava-build-job:\n  stage: build\n  script:\n    - echo \"Building Java\"\n\njava-test-job:\n  stage: test\n  script:\n    - echo \"Testing Java\"\n\n```\n\n\nNew `py.gitlab-ci.yml`:\n\n```\n\nstages:\n  - build\n  - test\n  - deploy\n\npython-build-job:\n  stage: build\n  script:\n    - echo \"Building Python\"\n\npython-test-job:\n  stage: test\n  script:\n    - echo \"Testing Python\"\n\n```\n\n\nThis accomplishes the same task of including the Java and Python jobs only\nwhen their directories are modified. Something to consider in your\nimplementation is that [jobs can run unexpectedly when using\n`changes`](https://docs.gitlab.com/ee/ci/jobs/job_troubleshooting.html#jobs-or-pipelines-run-unexpectedly-when-using-changes).\nThe changes rule always evaluates to true when pushing a new branch or a new\ntag to GitLab, so all jobs included will run upon first push to a branch\nregardless of the `rules:changes` definition. You can mitigate this\nexperience by creating your feature branch first and then opening a merge\nrequest to begin your development, since the first push to the branch when\nit is created will force all jobs to run.\n\n\nUltimately, monorepos are a strategy that can be used with GitLab and CI/CD,\nand, with our new `include` with `rules:changes` feature, we have a better\nbest practice for using GitLab CI with monorepos. To get started with\nmonorepos, take out a free Gitlab Ultimate trial today.\n\n\n## More CI/CD resources\n\n\n* [5 tips for managing monorepos in\nGitLab](https://about.gitlab.com/blog/tips-for-managing-monorepos-in-gitlab/)\n\n* [How to learn CI/CD\nfast](https://about.gitlab.com/blog/how-to-learn-ci-cd-fast/)\n",[9,917],{"slug":1369,"featured":6,"template":700},"building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way","content:en-us:blog:building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way.yml","Building A Gitlab Ci Cd Pipeline For A Monorepo The Easy Way","en-us/blog/building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way.yml","en-us/blog/building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way",{"_path":1375,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1376,"content":1382,"config":1388,"_id":1390,"_type":14,"title":1391,"_source":16,"_file":1392,"_stem":1393,"_extension":19},"/en-us/blog/building-build-images",{"title":1377,"description":1378,"ogTitle":1377,"ogDescription":1378,"noIndex":6,"ogImage":1379,"ogUrl":1380,"ogSiteName":685,"ogType":686,"canonicalUrls":1380,"schema":1381},"Getting [meta] with GitLab CI/CD: Building build images","Let's talk about building build images with GitLab CI/CD. The power of Docker as a build platform is unleashed when you get meta.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678567/Blog/Hero%20Images/building-blocks.jpg","https://about.gitlab.com/blog/building-build-images","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting [meta] with GitLab CI/CD: Building build images\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2019-08-28\",\n      }",{"title":1377,"description":1378,"authors":1383,"heroImage":1379,"date":1385,"body":1386,"category":718,"tags":1387},[1384],"Brendan O'Leary","2019-08-28","> An alternative title for this post could have been:\n\n>\n\n> I heard you liked Docker, so I put\n[dind](https://hub.docker.com/_/docker/).\n\n\n## Getting started\n\nIt should be clear by now that I love building stuff with GitLab CI/CD. From\n\n[DNS](https://medium.com/gitlab-magazine/ci-cd-all-the-things-pihole-625a0ceaf12)\n\nto [breakfast](/blog/introducing-auto-breakfast-from-gitlab/) GitLab CI/CD\n\noffers a pretty wide range. However, past those \"fun\" use cases, I also like\n\nto share some ~~best~~ practices I have acquired during my years of using\n[GitLab\n\nCI/CD](/solutions/continuous-integration/), both for software and\nnon-software projects alike.\n\n\nI crossed out \"best\" above because I don't really like the term \"best\npractices.\" It\n\nimplies that there is only one right answer to a given question – which is\nthe\n\nopposite of the point of computer science. Sure there are better and worse\nways to\n\ndo something – but like many things in life, you have to find what works for\n\nyou. \"[The best camera is the one you have with\nyou](https://www.amazon.com/Best-Camera-One-Thats-You/dp/0321684788)\"\n\ncomes to mind when building CI/CD for projects. Something that works is\nbetter than something that's pretty.\n\n\nBut, enough of my digression, let's get to the practice I wanted to share in\nthis\n\npost: Building build images as part of the build process. Yes, it is\nprecisely as meta as it sounds.\n\n\n## Why?\n\n\nOften when building a particular project, you may have several unique build\ndependencies.\n\nIn many languages, package managers solve for the majority if not all of\nthese\n\ndependencies – at least for build time (think [npm](https://www.npmjs.com),\n[RubyGems](https://rubygems.org/),\n\n[Maven](https://maven.apache.org/what-is-maven.html)). However, when we are\nbuilding and\n\ndeploying (CI/**CD** let's remember) from a machine that is not our own,\nthat may not\n\nbe enough. There may be a few dependencies we might need from elsewhere.\n\n\nThe language libraries themselves are one such dependency – to build Java\nI'm going to need\n\nthe JDK or JRE. To build Node, I'll need... well Node, etc. In a\nDocker-based environment,\n\nthose languages and dependencies typically have an official image on Docker\n\nHub ([JRE from Oracle](https://hub.docker.com/_/oracle-serverjre-8) or\n\n[Node from Node.js](https://hub.docker.com/_/node) for instance). Assume,\nhowever, that\n\nI may need a few other things not included in **either** those official\nDocker images or\n\nthe package manager I'm using. For instance, maybe I need a CLI tool for\n\ndeploy ([AWS](https://aws.amazon.com/cli/),\n[Heroku](https://devcenter.heroku.com/articles/heroku-cli),\n\n[Firebase](https://firebase.google.com/docs/cli), etc.). We also might need\na testing\n\nframework or tool like [Selenium](https://www.seleniumhq.org) or\n\n[headless\nChrome](https://developers.google.com/web/updates/2017/04/headless-chrome).\n\nOr I may need other tools for packaging, testing, or deployment.\n\n\nSometimes there is a Docker image on Docker Hub for these combinations – or\nsome of\n\nthem – but not always a maintained version. One easy solution to this could\nbe to\n\njust run the install of the tools before every job that needs it. This can\n\neven be \"automated\" using something like\n\nthe\n[before_script](https://docs.gitlab.com/ee/ci/yaml/#before_script-and-after_script)\nsyntax.\n\nHowever, this adds time to our pipeline and seems inefficient: Is there a\nbetter way?\n\n\n## Enter the GitLab Docker registry\n\nSince GitLab is a single application for the entire\n[DevOps](/topics/devops/) lifecycle – it actually\n\nships out of the box with a built-in\n\n[Docker\nregistry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html).\n\nThis can be a useful tool when deploying code in a containerized\nenvironment. We can\n\nbuild our application into a container and send it off into Kubernetes or\nsome\n\nother Docker orchestrator.\n\n\nHowever, I also see this registry as an opportunity to save time in my\n\npipeline (and save round trips to Docker hub and back every time). For\nbuilds that require\n\nsome of these extra dependencies, I like to build a \"build\" Docker image.\n\nThat way, I have an image with all of those baked right in. Then, as part of\nmy\n\npipeline, I can build the image at the start (only when changes are made or\nevery time).\n\nAnd the rest of the pipeline can consume that image as the base image.\n\n\n## Putting it in practice\n\nFor example, let's see what it looks like to build a simple Docker image to\nuse with\n\ndeploying to [Google Firebase](https://firebase.google.com/).\n\n\nFirebase is a \"backend as a service\" tool that provides a database,\nauthentication,\n\nand other services across platforms (web, iOS, and Android). It also\nincludes web hosting\n\nand several other items that can be deployed through [a\nCLI](https://firebase.google.com/docs/cli).\n\nThis tool makes getting started really easy. You can deploy the whole stack\nwith\n\n`firebase deploy.` Alternatively, you can deploy a part (like\n[serverless](/topics/serverless/) functions)\n\nwith a command like `firebase deploy --only functions.`\n\n\nMaking this work in a CI/CD world requires a few extra steps though. We'll\nneed a Node\n\nDocker image that has the firebase CLI in it, so let's make a simple\nDockerfile to do that.\n\n\n> Putting this Dockerfile in `.meta/Dockerfile`\n\n\n```dockerfile\n\nFROM node:10\n\n\nRUN npm install -g firebase-tools\n\n```\n\n\nNext, I'll add a job to the front of my pipeline.\n\n\n> Added to the front of my `.gitlab-ci.yml`\n\n\n```yaml\n\nmeta-build-image:\n  image: docker:stable\n  services:\n    - docker:dind\n  stage: prepare\n  script:\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - cd .meta\n    - docker build -t $CI_REGISTRY/group/project/buildimage:latest .\n    - docker push $CI_REGISTRY/group/project/buildimage:latest\n  only:\n    refs:\n      - main\n    changes:\n      - .meta/Dockerfile\n```\n\n\nLet's break down that job:\n\n1. We use the `docker:stable` image and a service of `docker:dind`\n\n1. The stage is my first stage called `prepare`\n\n1. In the script, we login to the GitLab registry with the built-in\nvariables and build the\n\nimage. For more details see the [GitLab documentation for building Docker\nimages](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html).\n\n1. We only run this on `main` and only when the `.meta/Dockerfile` changes.\nThis makes\n\nsure we are specific about when we change the Docker image. We could also\nuse the\n\ncommit hash or other methods here to make the image more fungible.\n\n\nNow, in further jobs down the pipeline, I can use the latest build of the\nDocker image like this:\n\n\n```yaml\n\nfirestore:\n  image: registry.gitlab.com/group/project/buildimage\n  stage: deploy 🚢🇮🇹\n  script:\n    - firebase deploy --only firestore\n  only:\n    changes:\n      - .firebase-config/firestore.rules\n      - .firebase-config/firestore.indexes.json\n```\n\n\nIn this job, we only run the job if something about\n\nthe [Firestore](https://firebase.google.com/docs/firestore) (the database\nfrom Firebase)\n\nconfiguration changes. And when it does, we run the `firestore deploy`\ncommand in CI. I\n\nalso added a token for deploy as a [GitLab CI/CD\nvariable](https://docs.gitlab.com/ee/ci/variables/)\n\nbased off the Firebase documentation\n\nfor [using firebase with\nCI](https://firebase.google.com/docs/cli#admin-commands).\n\n\n## Summary\n\nIn the end, this helps speed up pipelines by ensuring that you have a\ncustom-built build\n\nimage that you control. You don't have to rely on unstable or unmaintained\nDocker Hub\n\nimages or even have a Docker Hub account yourself to get started.\n\n\nTo learn more about GitLab CI/CD you can [read the GitLab\nwebsite](/solutions/continuous-integration/)\n\nor the [CI/CD docs](https://docs.gitlab.com/ee/ci/introduction/). Also,\nthere's a lot more to\n\nlearn about the [GitLab Docker\nregistry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html).\n\n\nCover image by [Hack\nCapital](https://unsplash.com/@markusspiske?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non\n[Unsplash](https://unsplash.com/search/photos/build?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n\n{: .note}\n",[9,1228,917],{"slug":1389,"featured":6,"template":700},"building-build-images","content:en-us:blog:building-build-images.yml","Building Build Images","en-us/blog/building-build-images.yml","en-us/blog/building-build-images",{"_path":1395,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1396,"content":1402,"config":1409,"_id":1411,"_type":14,"title":1412,"_source":16,"_file":1413,"_stem":1414,"_extension":19},"/en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features",{"title":1397,"description":1398,"ogTitle":1397,"ogDescription":1398,"noIndex":6,"ogImage":1399,"ogUrl":1400,"ogSiteName":685,"ogType":686,"canonicalUrls":1400,"schema":1401},"Building GitLab with GitLab: A multi-region service to deliver AI features","Discover how we built our first multi-region deployment for teams at GitLab using the platform's many features, helping create a frictionless developer experience for GitLab Duo users.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098664/Blog/Hero%20Images/Blog/Hero%20Images/building-gitlab-with-gitlab-no-type_building-gitlab-with-gitlab-no-type.png_1750098663794.png","https://about.gitlab.com/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building GitLab with GitLab: A multi-region service to deliver AI features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chance Feick\"},{\"@type\":\"Person\",\"name\":\"Sam Wiskow\"}],\n        \"datePublished\": \"2024-09-12\",\n      }",{"title":1397,"description":1398,"authors":1403,"heroImage":1399,"date":1406,"body":1407,"category":718,"tags":1408},[1404,1405],"Chance Feick","Sam Wiskow","2024-09-12","For GitLab Duo, real-time AI-powered capabilities like [Code\nSuggestions](https://about.gitlab.com/solutions/code-suggestions/) need\nlow-latency response times for a frictionless developer experience. Users\ndon’t want to interrupt their flow and wait for a code suggestion to show\nup. To ensure GitLab Duo can provide the right suggestion at the right time\nand meet high performance standards for critical AI infrastructure, GitLab\nrecently launched our first multi-region service to deliver AI features.\n\n\nIn this article, we will cover the benefits of multi-region services, how we\nbuilt an internal platform codenamed ‘Runway’ for provisioning and deploying\nmulti-region services using GitLab features, and the lessons learned\nmigrating to multi-region in production.\n\n\n## Background on the project\n\n\nRunway is GitLab’s internal platform as a service (PaaS) for provisioning,\ndeploying, and operating containerized services. Runway's purpose is to\nenable GitLab service owners to self-serve infrastructure needs with\nproduction readiness out of the box, so application developers can focus on\nproviding value to customers. As part of [our corporate value of\ndogfooding](https://handbook.gitlab.com/handbook/values/#results), the first\niteration was built in 2023 by the Infrastructure department on top of core\nGitLab capabilities, such as continuous integration/continuous delivery\n([CI/CD](https://about.gitlab.com/topics/ci-cd/)), environments, and\ndeployments.\n\n\nBy establishing automated GitOps best practices, Runway services use\ninfrastructure as code (IaC), merge requests (MRs), and CI/CD by default.\n\n\nGitLab Duo is primarily powered by [AI\nGateway](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist),\na satellite service written in Python outside of GitLab’s modular monolith\nwritten in Ruby. In cloud computing, a region is a geographical location of\ndata centers operated by cloud providers.\n\n\n## Defining a multi-region strategy\n\n\nDeploying in a single region is a good starting point for most services, but\ncan come with downsides when you are trying to reach a global audience.\nUsers who are geographically far from where your service is deployed may\nexperience different levels of service and responsiveness than those who are\ncloser. This can lead to a poor user experience, even if your service is\nwell built in all other respects.\n\n\nFor AI Gateway, it was important to meet global customers wherever they are\nlocated, whether on GitLab.com or self-managed instances using Cloud\nConnector. When a developer is deciding to accept or reject a code\nsuggestion, milliseconds matter and can define the user experience.\n\n\n### Goals\n\n\nMulti-region deployments require more infrastructure complexity, but for use\ncases where latency is a core component of the user experience, the benefits\noften outweigh the downsides. First, multi-region deployments offer\nincreased responsiveness to the user. By serving requests from locations\nclosest to end users, latency can be significantly reduced. Second,\nmulti-region deployments provide greater availability. With fault tolerance,\nservices can fail over during a regional outage. There is a much lower\nchance of a service failing completely, meaning users should not be\ninterrupted even in partial failures.\n\n\nBased on our goals for performance and availability, we used this\nopportunity to create a scalable multi-region strategy in Runway, which is\nbuilt leveraging GitLab features.\n\n\n### Architecture\n\n\nIn SaaS platforms, GitLab.com’s infrastructure is hosted on Google Cloud\nPlatform (GCP). As a result, Runway’s first supported platform runtime is\nCloud Run. The initial workloads deployed on Runway are stateless satellite\nservices (e.g., AI Gateway), so Cloud Run services are a good fit that\nprovide a clear migration path to more complex and flexible platform\nruntimes, e.g. Kubernetes.\n\n\nBuilding Runway on top of GCP Cloud Run using GitLab has allowed us to\niterate and tease out the right level of abstractions for service owners as\npart of a platform play in the Infrastructure department.\n\n\nTo serve traffic from multiple regions in Cloud Run, the multi-region\ndeployment strategy must support global load balancing, and the provisioning\nand configuration of regional resources. Here’s a simplified diagram of the\nproposed architecture in GCP:\n\n\n![simplified diagram of the proposed architecture in\nGCP](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098671/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098671612.png)\n\n\nBy replicating Cloud Run services across multiple regions and configuring\nthe existing global load balancing with serverless network endpoint group\n(NEG) backends, we’re able to serve traffic from multiple regions. For the\nremainder of the article, we’ll focus less on specifics of Cloud Run and\nmore on how we’re building with GitLab.\n\n\n## Building a multi-region platform with GitLab\n\n\nNow that you have context about Runway, let's walk through how to build a\nmulti-region platform using GitLab features.\n\n\n### Provision\n\n\nWhen building an internal platform, the first challenge is provisioning\ninfrastructure for a service. In Runway, Provisioner is the component that\nis responsible for maintaining a service inventory and managing IaC for GCP\nresources using Terraform.\n\n\nTo provision a service, an application developer will open an MR to add a\nservice project to the inventory using git, and Provisioner will create\nrequired resources, such as service accounts and identity and access\nmanagement policies. When building this functionality with GitLab, Runway\nleverages [OpenID Connect (OIDC) with GPC Workload Identity\nFederation](https://docs.gitlab.com/ee/ci/cloud\\_services/google\\_cloud/)\nfor managing IaC.\n\n\nAdditionally, Provisioner will create a deployment project for each service\nproject. The purpose of creating separate projects for deployments is to\nensure the [principle of least\nprivilege](https://about.gitlab.com/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab/)\nby authenticating as a GCP service account with restricted permissions.\nRunway leverages the [Projects\nAPI](https://docs.gitlab.com/ee/api/projects.html) for creating projects\nwith [Terraform\nprovider](https://registry.terraform.io/providers/gitlabhq/gitlab/latest/docs).\n\n\nFinally, Provisioner defines variables in the deployment project for the\nservice account, so that deployment CI jobs can authenticate to GCP. Runway\nleverages [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) and\n[Job Token\nallowlist](https://docs.gitlab.com/ee/ci/jobs/ci\\_job\\_token.html\\#add-a-group-or-project-to-the-job-token-allowlist)\nto handle authentication and authorization.\n\n\nHere’s a simplified example of provisioning a multi-region service in the\nservice inventory:\n\n\n```\n\n{\n  \"inventory\": [\n    {\n      \"name\": \"example-service\",\n      \"project_id\": 46267196,\n      \"regions\": [\n        \"europe-west1\",\n        \"us-east1\",\n        \"us-west1\"\n      ]\n    }\n  ]\n}\n\n```\n\n\nOnce provisioned, a deployment project and necessary infrastructure will be\ncreated for a service.\n\n\n### Configure\n\n\nAfter a service is provisioned, the next challenge is the configuration for\na service. In Runway,\n[Reconciler](https://gitlab.com/gitlab-com/gl-infra/platform/runway/runwayctl)\nis a component that is responsible for configuring and deploying services by\naligning the actual state with the desired state using Golang and Terraform.\n\n\nHere’s a simplified example of an application developer configuring GitLab\nCI/CD in their service project:\n\n\n```\n\n# .gitlab-ci.yml\n\nstages:\n  - validate\n  - runway_staging\n  - runway_production\n\ninclude:\n  - project: 'gitlab-com/gl-infra/platform/runway/runwayctl'\n    file: 'ci-tasks/service-project/runway.yml'\n    inputs:\n      runway_service_id: example-service\n      image: \"$CI_REGISTRY_IMAGE/${CI_PROJECT_NAME}:${CI_COMMIT_SHORT_SHA}\"\n      runway_version: v3.22.0\n\n# omitted for brevity\n\n```\n\n\nRunway provides sane default values for configuration that are based on our\nexperience in delivering stable and reliable features to customers.\nAdditionally, service owners can configure infrastructure using a service\nmanifest file hosted in a service project. The service manifest uses JSON\nSchema for validation. When building this functionality with GitLab, Runway\nleverages [Pages](https://docs.gitlab.com/ee/user/project/pages/) for schema\ndocumentation.\n\n\nTo deliver this part of the platform, Runway leverages [CI/CD\ntemplates](https://docs.gitlab.com/ee/development/cicd/templates.html),\n[Releases](https://docs.gitlab.com/ee/user/project/releases/), and\n[Container\nRegistry](https://docs.gitlab.com/ee/user/packages/container\\_registry/) for\nintegrating with service projects.\n\n\nHere’s a simplified example of a service manifest:\n\n\n```\n\n# .runway/runway-production.yml\n\napiVersion: runway/v1\n\nkind: RunwayService\n\nspec:\n container_port: 8181\n regions:\n   - us-east1\n   - us-west1\n   - europe-west1\n\n# omitted for brevity\n\n```\n\n\nFor multi-region services, Runway injects an environment variable into the\ncontainer instance runtime, e.g. RUNWAY\\_REGION, so application developers\nhave the context to make any downstream dependencies regionally-aware, e.g.\nVertex AI API.\n\n\nOnce configured, a service project will be integrated with a deployment\nproject.\n\n\n### Deploy\n\n\nAfter a service project is configured, the next challenge is deploying a\nservice. In Runway, Reconciler handles this by triggering a deployment job\nin the deployment project when an MR is merged to the main branch. When\nbuilding this functionality with GitLab, Runway leverages [Trigger\nPipelines](https://docs.gitlab.com/ee/ci/triggers/) and [Multi-Project\nPipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream\\_pipelines.html\\#multi-project-pipelines)\nto trigger jobs from service project to deployment project.\n\n\n![trigger jobs from service project to deployment\nproject](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098671612.png)\n\n\nOnce a pipeline is running in a deployment project, it will be deployed to\nan environment. By default, Runway will provision staging and production\nenvironments for all services. At this point, Reconciler will apply any\nTerraform resource changes for infrastructure. When building this\nfunctionality with GitLab, Runway leverages\n[Environments/Deployments](https://docs.gitlab.com/ee/ci/environments/) and\n[GitLab-managed Terraform\nstate](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform\\_state.html)\nfor each service.\n\n\n![Reconciler applies any Terraform resource changes for\ninfrastructure](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098671614.png)\n\n\nRunway provides default application metrics for services. Additionally,\ncustom metrics can be used by enabling a sidecar container with\nOpenTelemetry Collector configured to scrape Prometheus and remote write to\nMimir. By providing observability out of the box, Runway is able to bake\nmonitoring into CI/CD pipelines.\n\n\nExample scenarios include gradual rollouts for blue/green deployments,\npreventing promotions to production when staging is broken, or automatically\nrolling back to previous revision when elevated error rates occur in\nproduction.\n\n\n![Runway bakes monitoring into CI/CD\npipelines](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098671615.png)\n\n\nOnce deployed, environments will serve the latest revision of a service. At\nthis point, you should have a good understanding of some of the challenges\nthat will be encountered, and how to solve them with GitLab features.\n\n\n## Migrating to multi-region in production\n\n\nAfter extending Runway components to support multi-region in Cloud Run, the\nfinal challenge was migrating from AI Gateway’s single-region deployment in\nproduction with zero downtime. Today, teams using Runway to deploy their\nservices can self-serve on regions making a multi-region deployment just as\nsimple as a single-region deployment. \n\n\nWe were able to iterate on building multi-region functionality without\nimpacting existing infrastructure by using semantic versioning for Runway.\nNext, we’ll share some learnings from the migration that may inform how to\noperate services for an internal multi-region platform.\n\n\n### Dry run deployments\n\n\nIn Runway, Reconciler will apply Terraform changes in CI/CD. The trade-off\nis that plans cannot be verified in advance, which could risk inadvertently\ndestroying or misconfiguring production infrastructure. To solve this\nproblem, Runway will perform a “dry run” deployment for MRs.\n\n\n![\"Dry run\"\ndeployment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098671616.png)\n\n\nFor migrating AI Gateway, dry run deployments increased confidence and\nhelped mitigate risk of downtime during rollout. When building an internal\nplatform with GitLab, we recommend supporting dry run deployments from the\nstart.\n\n\n### Regional observability\n\n\nIn Runway, existing observability was aggregated by assuming a single-region\ndeployment. To solve this problem, Runway observability was retrofitted to\ninclude a new region label for Prometheus metrics.\n\n\nOnce metrics were retrofitted, we were able to introduce service level\nindicators (SLIs) for both regional Cloud Run services and global load\nbalancing. Here’s an example dashboard screenshot for a general Runway\nservice:\n\n\n![dashboard screenshot for a general Runway\nservice](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098671617.png)\n\n\n***Note:** Data is not actual production data and is only for illustration\npurposes.*\n\n\nAdditionally, we were able to update our service level objectives (SLOs) to\nsupport regions. As a result, service owners could be alerted when a\nspecific region experiences an elevated error rate, or increase in response\ntimes.\n\n\n![screenshot of\nalerts](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098671617.png)\n\n\n***Note:** Data is not actual production data and is only for illustration\npurposes.*\n\n\nFor migrating AI Gateway, regional observability increased confidence and\nhelped provide more visibility into new infrastructure. When building an\ninternal platform with GitLab, we recommend supporting regional\nobservability from the start.\n\n\n### Self-service regions\n\n\nThe Infrastructure department successfully performed the initial migration\nof multi-region support for AI Gateway in production with zero downtime.\nGiven the risk associated with rolling out a large infrastructure migration,\nit was important to ensure the service continued working as expected.\n\n\nShortly afterwards, service owners began self-serving additional regions to\nmeet the growth of customers. At the time of writing, [GitLab\nDuo](https://about.gitlab.com/gitlab-duo/) is available in six regions\naround the globe and counting. Service owners are able to configure the\ndesired regions, and Runway will provide guardrails along the way in a\nscalable solution.\n\n\nAdditionally, three other internal services have already started using\nmulti-region functionality on Runway. Application developers have entirely\nself-served functionality, which validates that we’ve provided a good\nplatform experience for service owners. For a platform play, a scalable\nsolution like Runway is considered a good outcome since the Infrastructure\ndepartment is no longer a blocker.\n\n\n## What’s next for Runway\n\n\nBased on how quickly we could iterate to provide results for customers, the\nSaaS Platforms department has continued to invest in Runway. We’ve grown the\nRunway team with additional contributors, started evolving the platform\nruntime (e.g. Google Kubernetes Engine), and continue dogfooding with\ntighter integration in the product.\n\n\nIf you’re interested in learning more, feel free to check out\n[https://gitlab.com/gitlab-com/gl-infra/platform/runway](https://gitlab.com/gitlab-com/gl-infra/platform/runway).\n\n\n## More Building GitLab with GitLab\n\n- [Why there is no MLOps without\nDevSecOps](https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops/)\n\n- [Stress-testing Product\nAnalytics](https://about.gitlab.com/blog/building-gitlab-with-gitlab-stress-testing-product-analytics/)\n\n- [Web API Fuzz\nTesting](https://about.gitlab.com/blog/building-gitlab-with-gitlab-api-fuzzing-workflow/)\n\n- [How GitLab.com inspired\nDedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/)\n\n- [Expanding our security certification\nportfolio](https://about.gitlab.com/blog/building-gitlab-with-gitlab-expanding-our-security-certification-portfolio/)\n",[9,786,785,1064,917,828,939,1105,696,851],{"slug":1410,"featured":91,"template":700},"building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features","content:en-us:blog:building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features.yml","Building Gitlab With Gitlab A Multi Region Service To Deliver Ai Features","en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features.yml","en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features",{"_path":1416,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1417,"content":1423,"config":1429,"_id":1431,"_type":14,"title":1432,"_source":16,"_file":1433,"_stem":1434,"_extension":19},"/en-us/blog/built-in-ci-cd-version-control-secret",{"title":1418,"description":1419,"ogTitle":1418,"ogDescription":1419,"noIndex":6,"ogImage":1420,"ogUrl":1421,"ogSiteName":685,"ogType":686,"canonicalUrls":1421,"schema":1422},"The market figured out GitLab’s secret","Why we decided to combine version control with CI, and the rise of the single application.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663648/Blog/Hero%20Images/gitlab-joins-cd-foundation.jpg","https://about.gitlab.com/blog/built-in-ci-cd-version-control-secret","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The market figured out GitLab’s secret\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2019-08-08\",\n      }",{"title":1418,"description":1419,"authors":1424,"heroImage":1420,"date":1426,"body":1427,"category":1040,"tags":1428},[1425],"Sid Sijbrandij","2019-08-08","\n\nThere’s a movement in the DevOps industry and the world right now: to do more in a simple way that inspires us to innovate. GitLab started this trend in the DevOps space by simplifying the delivery of code by combining GitLab CI and [GitLab version control](/topics/version-control/). We didn't originally buy into the idea that this was the right way to do things, but it became our secret capability that we’ve doubled down on.\n\n## Let’s combine applications\n\nThe story starts with [Kamil Trzciński](/company/team/#ayufanpl), now a distinguished engineer at GitLab. Soon after Kamil came to work for GitLab full time, he began talking with me and my co-founder, [Dmitriy Zaporozhets](/company/team/#dzaporozhets), suggesting that we bring our two projects together – GitLab Version Control and GitLab CI, making it into one application. Dmitriy didn’t think it was a good idea. GitLab version control and CI were already perfectly integrated with single sign-on and APIs that fit like a glove. He thought that combining them would make GitLab a monolith of an application, that it would be disastrous for our code quality, and an unfortunate user experience. After time though, Dmitriy started to think it was the right idea as it would deliver a seamless experience for developers to deliver code quickly.\n\nAfter Dmitriy was convinced, they came to me. I also didn’t think it was a good idea. At the time I believed we needed to have tools that are composable and that could integrate with other tools, in line with the Unix philosophy. Kamil convinced me to think about the efficiencies of having a single application.\n\n>“Well, if you don’t believe that it’s better for a user, at least believe it’s more efficient for us, because we only have to release one application instead of two. Efficiency is in our values.” - Kamil Trzcinski, distinguished engineer at GitLab\n\n## Realizing the future of DevOps is a single application\n\nThat made sense to me and I no longer stood in their way. The two projects merged and the results were beyond my expectations. The efficiencies that were so appealing to us, also made it appealing to our customers. We realized we stumbled on a big secret because nobody believed that the two combined together would be a better way of continuously delivering code to market. We doubled down on this philosophy and we started doing [continuous delivery](/topics/continuous-delivery/).\n\nFrom that day on, I saw the value of having a single application. For example, a new feature we are implementing is auto-remediation. When a vulnerability comes out, say a heart bleed, GitLab will automatically detect where in your codebase that vulnerability exists, update the dependency, and deliver it to your production environment. This level of automation would be hard to implement without being in a single application. By combining the projects we unified teams – helping them realize the original intent of DevOps – and that is magical to see.\n\n## The market validates our secret\n\nAnd while we bet on this philosophy the industry is now seeing it as well. In September of 2015 we [combined GitLab CI and GitLab version control](/releases/2015/09/22/gitlab-8-0-released/) to create a single application. By March of 2017, Bitbucket also realized the advantages of this architecture and [released Pipelines as a built-in part of Bitbucket](https://dzone.com/articles/bitbucket-adds-pipelines). In 2018, [GitHub announced Actions](https://techcrunch.com/2018/10/16/github-launches-actions-its-workflow-automation-tool/) with CI-like functionality built into a single application offering. In the last six months, [JFrog acquired Shippable](https://techcrunch.com/2019/02/21/jfrog-acquires-shippable-adding-continuous-integration-and-delivery-to-its-devops-platform/) and [Idera acquired Travis CI](https://hub.packtpub.com/idera-acquires-travis-ci-the-open-source-continuous-integration-solution/), showing a consolidation of the DevOps market and a focus on CI. The market is validating what we continually hear from our users and customers: that a simple, single DevOps application meets their needs better.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/MNxkyLrA5Aw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWe hope you will continue to join us in our effort to bring teams together to innovate. [Everyone can contribute](/company/mission/#mission) here at GitLab and as always, we value your feedback, thoughts, and contributions.\n\nWant to hear me talk through the origin story? Listen to the [Software Engineering Daily podcast](https://softwareengineeringdaily.com/2019/03/15/gitlab-with-sid-sijbrandij/) where I talk about combining GitLab CI and GitLab Version Control.\n",[9,721],{"slug":1430,"featured":6,"template":700},"built-in-ci-cd-version-control-secret","content:en-us:blog:built-in-ci-cd-version-control-secret.yml","Built In Ci Cd Version Control Secret","en-us/blog/built-in-ci-cd-version-control-secret.yml","en-us/blog/built-in-ci-cd-version-control-secret",{"_path":1436,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1437,"content":1443,"config":1449,"_id":1451,"_type":14,"title":1452,"_source":16,"_file":1453,"_stem":1454,"_extension":19},"/en-us/blog/business-impact-ci-cd",{"title":1438,"description":1439,"ogTitle":1438,"ogDescription":1439,"noIndex":6,"ogImage":1440,"ogUrl":1441,"ogSiteName":685,"ogType":686,"canonicalUrls":1441,"schema":1442},"The business impact of CI/CD","How a good CI/CD strategy generates revenue and keeps developers happy.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670016/Blog/Hero%20Images/modernize-cicd.jpg","https://about.gitlab.com/blog/business-impact-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The business impact of CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"},{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2019-06-21\",\n      }",{"title":1438,"description":1439,"authors":1444,"heroImage":1440,"date":1446,"body":1447,"category":1040,"tags":1448},[715,1445],"William Chia","2019-06-21","\n\n[Continuous integration and delivery](/solutions/continuous-integration/) helps [DevOps](/topics/devops/) teams ship higher quality software, faster. But is all [CI/CD](/topics/ci-cd/) created equal? What does successful CI/CD implementation look like and how do you know you’re on the right track?\n\nIn this four-part series, we talk about modernizing your CI/CD: Challenges, impact, outcomes, and solutions. In [part one](/blog/modernize-your-ci-cd/), we focused on common CI/CD challenges. Today, we’ll talk about the revenue impact of a poor or non-existent CI/CD strategy.\n\nIf these problems hit a little too close to home, stay tuned for part three where we dive deeper into what organizations gain when they implement better CI/CD.\n\n## What are the business impacts of bad CI/CD?\n\n### 1. A large portion of IT budget is spent on undifferentiated engineering\n\nOpportunity costs play a much larger role in the development process than we realize. Organizations can only afford so many engineers at one time, and systems that require extensive maintenance means fewer engineers are working on revenue-generating projects. This will lead to slower innovation and slower growth in the long term. Undifferentiated engineering means too many individuals are having to focus on one thing – maintenance.\n\n### 2. Delayed (and even unrealized) revenue\n\nThis is the impact of lost opportunity costs. When there are too many dependencies, too many handoffs, and too many manual tasks, it causes delays between when code is written and when the business gets value from that code. In worst cases, code is written and the business never gets any value from it at all. Code can sit in limbo waiting for others to manually test it, and by the time it’s finally reviewed it’s already irrelevant. The opportunity cost essentially doubles: Engineers were paid to work on code that never deployed, and the business loses out on revenue the code could have generated.\n\n### 3. Lower developer productivity, lower developer happiness, and less reliable software\n\nDowntime = lost revenue. To avoid that dreaded downtime, developers are spending time working on infrastructure and configuration, and they’re also not spending that time delivering business logic. In both cases, they’re being less productive and working outside of their core competencies. Developer hiring and retention will inevitably suffer. Uptime and resiliency are also affected because people who aren’t domain experts are put in charge of determining infrastructure. It’s a self-fulfilling prophecy.\n\n## What does it look like if a magic wand were to solve it today?\n\n### 1. More engineers are working on the app instead of maintenance\n\nThe organization has the right amount of developers devoted to driving business value and spends more time on innovation instead of undifferentiated heavy lifting. Less of the budget is spent on activities that don't generate revenue.\n\n### 2. Developers see their code in production quickly\n\nInfrastructure and deployment are [fully automated](https://docs.gitlab.com/ee/topics/autodevops/). Everyone loves to see the output of their work, developers especially, and the business gets to see the benefits of this code right away. Deploying smaller chunks of code is less risky when developers can take advantage of test automation, so they have less overhead and coordination with a QA team forced to test manually.\n\n### 3. Developers are focused on solving business problems\n\nCode is written to be environment and cloud agnostic. Development teams own the uptime of their own services, but they are fully supported by the ops team. Ops owns the infrastructure, dev owns the service, and both teams can work according to their strengths.\n\nSolving these problems doesn’t require waving a wand or any magic at all. Modernizing your architecture and embracing CI/CD is what other companies are doing to release better software, faster. When organizations implement CI/CD best practices, they get the added benefit of generating more revenue in the long run.\n\nSo what makes “good” CI/CD? We invite you to compare GitLab CI/CD to other CI tools and see why we were rated #1 in the Forrester CI Wave™.\n\n[Explore GitLab CI/CD](/solutions/continuous-integration/)\n{: .alert .alert-gitlab-purple .text-center}\n\nPhoto by [Jungwoo Hong](https://unsplash.com/photos/cYUMaCqMYvI?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/arrow?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[721,9,875],{"slug":1450,"featured":6,"template":700},"business-impact-ci-cd","content:en-us:blog:business-impact-ci-cd.yml","Business Impact Ci Cd","en-us/blog/business-impact-ci-cd.yml","en-us/blog/business-impact-ci-cd",{"_path":1456,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1457,"content":1463,"config":1469,"_id":1471,"_type":14,"title":1472,"_source":16,"_file":1473,"_stem":1474,"_extension":19},"/en-us/blog/can-your-ci-cd-environment-support-ai-powered-devsecops",{"title":1458,"description":1459,"ogTitle":1458,"ogDescription":1459,"noIndex":6,"ogImage":1460,"ogUrl":1461,"ogSiteName":685,"ogType":686,"canonicalUrls":1461,"schema":1462},"Can your CI/CD environment support AI-powered DevSecOps? ","Unlock the value of AI-powered software development with a DevSecOps platform capable of supporting CI/CD hyperscale.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683522/Blog/Hero%20Images/AdobeStock_659839979.jpg","https://about.gitlab.com/blog/can-your-ci-cd-environment-support-ai-powered-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Can your CI/CD environment support AI-powered DevSecOps? \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2024-01-03\",\n      }",{"title":1458,"description":1459,"authors":1464,"heroImage":1460,"date":1466,"body":1467,"category":849,"tags":1468},[1465],"Darren Eastman","2024-01-03","Our customers are experiencing a significant increase in the efficiency and pace of software development with [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI capabilities powering your workflow. This will likely correspond to a rise in the two [DORA metrics](https://about.gitlab.com/solutions/value-stream-management/dora/) that measure velocity: deployment frequency and lead time for changes. However, what may not be as obvious is that the age of AI-powered software development has also ushered in a new era of large-scale computing for CI/CD jobs. Organizations will need to learn how to support this CI/CD hyperscale.\n\n## What is CI/CD hyperscale?\n\nAs a quick recap, a [CI/CD](https://about.gitlab.com/topics/ci-cd/) job is a unit of work that is executed on a host computing system each time a developer pushes a code change to a project repository. This core guiding principle has accelerated value creation by software development teams over the past few years. Instead of focusing on the ceremonies of legacy project management, today’s leading software development teams deliver value by adding small increments to a software product regularly – weekly, daily, and even hourly.\n\nThe CI/CD engine supports this modern pattern of software product development by enabling development teams to define automation to continuously build, test, and integrate any new software change. Some of our larger customers who have transformed their software development practices have already attained what we at GitLab have labeled as CI/CD hyperscale. That is, they are typically running 3 million or more CI/CD jobs per month.\n\n## How to support AI-fueled CI/CD growth\n\nWith the advent of AI-powered DevSecOps, we hypothesize that, starting in 2024, organizations will see a 2x increase per year in the number of CI/CD jobs run by development teams leveraging AI-assisted features across the software development lifecycle. Starting with 3 million jobs per month as the baseline and assuming a 2% growth rate per month, the chart below illustrates the potential impact in the growth of CI/CD jobs monthly due to efficiency gains with AI-powered DevSecOps.\n\n![cicd hyperscale chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683548/Blog/Content%20Images/image1.png)\n\nSo, what steps can you take to ensure your organization is positioned for success in this hyper-competitive new age of AI-powered DevSecOps? \n\nHere are a few pointers to get you started:\n\n- Analyze whether you have enabled your development teams – the creators of value – with the flexibility to adopt modern patterns in software product development. \n- Inventory the tools you use to support creative work, including project and task management.\n- Inventory the tools you use for software development and DevOps. Are you using multiple types of CI systems across your environment? If so, the next step is to gain a deeper understanding of why that is. \n- Create a plan to migrate all software development teams from multiple point solutions to one DevSecOps platform. \n\nYour software development teams may have built up extensive tooling using various CI systems and point solutions and may question the return on investment of a potentially time-consuming consolidation and migration effort. However, based on our internal data, customers adopting GitLab [realize payback in less than six months](https://about.gitlab.com/blog/why-the-market-is-moving-to-a-platform-approach-to-devsecops/).  \n\nIf you have already adopted the GitLab AI-powered DevSecOps Platform and are consolidating your platform engineering and software development processes, then you are well on your way to unlocking the value of AI-powered software development and having a solution capable of supporting CI/CD hyperscale. \n\nAs the pioneer of the integrated DevSecOps platform, we have been at the forefront of supporting CI/CD hyperscale for customers running CI/CD workloads on the fully managed GitLab SaaS CI/CD build environment or their own self-managed build infrastructure. That focused investment over the past decade has resulted in the development of the most scalable and flexible CI/CD engine – the core of the GitLab DevSecOps Platform. Look no further than our milestone of [more than 1 billion pipelines run on GitLab's SaaS-based DevSecOps Platform](https://about.gitlab.com/blog/one-billion-pipelines-cicd/). \n\nSo with GitLab CI and GitLab Runner, the ultimate CI/CD execution engine, the GitLab DevSecOps Platform is a solution that provides the foundation to continuously improve and transform your value creation processes while supporting the scale required to meet the competitive demands of the new age of AI.\n\n## Learn how to achieve CI/CD hyperscale\n\nIf you are new to GitLab and are interested in learning how we can help you transform your software development processes, [contact our sales team](https://about.gitlab.com/sales/) to help you with a custom demo and get you going on your adoption of AI-powered DevSecOps.\n",[9,696,851],{"slug":1470,"featured":6,"template":700},"can-your-ci-cd-environment-support-ai-powered-devsecops","content:en-us:blog:can-your-ci-cd-environment-support-ai-powered-devsecops.yml","Can Your Ci Cd Environment Support Ai Powered Devsecops","en-us/blog/can-your-ci-cd-environment-support-ai-powered-devsecops.yml","en-us/blog/can-your-ci-cd-environment-support-ai-powered-devsecops",{"_path":1476,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1477,"content":1483,"config":1490,"_id":1492,"_type":14,"title":1493,"_source":16,"_file":1494,"_stem":1495,"_extension":19},"/en-us/blog/cascading-merge-requests-with-gitlab-flow",{"title":1478,"description":1479,"ogTitle":1478,"ogDescription":1479,"noIndex":6,"ogImage":1480,"ogUrl":1481,"ogSiteName":685,"ogType":686,"canonicalUrls":1481,"schema":1482},"How to adopt a cascading merge request strategy with GitLab Flow","This tutorial explains how to consolidate updates in a single branch and propagate them to other branches using ucascade bot.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679851/Blog/Hero%20Images/cascade.jpg","https://about.gitlab.com/blog/cascading-merge-requests-with-gitlab-flow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to adopt a cascading merge request strategy with GitLab Flow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Madou Coulibaly\"}],\n        \"datePublished\": \"2023-08-31\",\n      }",{"title":1478,"description":1479,"authors":1484,"heroImage":1480,"date":1486,"body":1487,"category":718,"tags":1488},[1485],"Madou Coulibaly","2023-08-31","Git offers a range of branching strategies and workflows that can be\nutilized to enhance organization, efficiency, and code quality. Employing a\nwell-defined workflow helps foster a successful and streamlined development\nprocess. By implementing the [release branches using GitLab\nFlow](https://docs.gitlab.com/ee/topics/gitlab_flow.html#release-branches-with-gitlab-flow),\nyou can effectively handle multiple product releases. However, when it comes\nto fixing bugs, it often becomes necessary to apply the fix across various\nstable branches such as `main`,  `stable-1.0`, `stable-1.1`, and\n`stable-2.0`. The process of applying the fix to multiple locations can be\ntime-consuming, as it involves the manual creation of multiple merge\nrequests.\n\n\nBy consolidating updates in a single branch and propagating them to other\nbranches, the cascading merge approach establishes a central source of\ntruth, reducing confusion and maintaining consistency. In this blogpost, we\nwill guide you through setting up this approach for your GitLab project\nusing [ucascade bot](https://github.com/unblu/ucascade).\n\n\n## Getting started\n\nTo get started, you'll need the following prerequisites:\n\n\n### Environment\n  - a GitLab project that implemented [Release Branches Strategy](https://docs.gitlab.com/ee/topics/gitlab_flow.html#release-branches-with-gitlab-flow)\n  - a Kubernetes cluster\n\n### CLI\n  - git\n  - kubectl\n  - docker\n\n### Project access tokens\n\nFollow the instructions on the [Project access tokens\npage](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html#create-a-project-access-token)\nto create two project access tokens –`ucascade` and `ucascade-approver` –\nwith the API scope in your GitLab project.\n\n\n![project access\ntokens](https://about.gitlab.com/images/blogimages/2023-06-22-cascading-merge-requests-with-gitlab-flow/pat.png){:\n.shadow.medium}\n\n\n## Deploy ucascade bot on Kubernetes\n\nFirst, create the `bots-fleet` namespace on Kubernetes.\n\n\n```\n\nkubectl create namespace bots-fleet\n\n```\n\n\nThen, create the `cascading-merge-secret` secret that contains the GitLab\nproject access tokens created previously.\n\n\n```\n\nkubectl create secret generic cascading-merge-secret -n bots-fleet \\\n\n--from-literal=gitlab-host=https://gitlab.com \\\n\n--from-literal=gitlab-api-token=\u003CUCASCADE_PROJECT_ACCESS_TOKEN> \\\n\n--from-literal=gitlab-api-token-approver=\u003CAPPROVER_BOT_PROJECT_ACCESS_TOKEN>\n\n```\n\n\nOnce done, (fork and) clone the [Cascading Merge\nrepository](https://gitlab.com/madou-stories/bots-fleet/cascading-merge)\nthat contains the Kubernetes manifests for the bot and replace the `host`\nfield in the `kube/ingress.yaml` file according to your Kubernetes domain.\n\n\n```yaml\n\napiVersion: networking.k8s.io/v1\n\nkind: Ingress\n\nmetadata:\n  annotations:\n    kubernetes.io/ingress.class: nginx\n  name: ucascade\n  namespace: bots-fleet\nspec:\n  rules:\n  - host: ucascade.\u003CKUBERNETES_BASED_DOMAIN>\n    http:\n      paths:\n      - backend:\n          service:\n            name: ucascade\n            port:\n              number: 80\n        path: /\n        pathType: Prefix\n\n``` \n\n\nNow, you are ready to deploy the `ucascade` bot.\n\n\n```\n\nkubectl apply -f kube/\n\n```\n\n\nYou should see the following resources deployed on Kubernetes:\n\n\n![ucascade-k8s](https://about.gitlab.com/images/blogimages/2023-06-22-cascading-merge-requests-with-gitlab-flow/ucascade-k8s.png){:\n.shadow.medium}\n\n\n**Note:** The `ucascade` image is based on the\n[ucascade-bot](https://github.com/unblu/ucascade-bot) and is located in the\n[Container\nRegistry](https://gitlab.com/madou-stories/bots-fleet/cascading-merge/container_registry)\nof the Cascading Merge repository.\n\n{: .note}\n\n\n## Create a GitLab webhook\n\nFollow the instructions on [the Webhooks\npage](https://docs.gitlab.com/ee/user/project/integrations/webhooks.html#configure-a-webhook-in-gitlab)\nto create a webhook with the following variables: \n  - **URL**: `\u003CUCASCADE_INGRESS_URL>/ucascade/merge-request`\n  - **Trigger**: `Merge request events`\n\n![webhook](https://about.gitlab.com/images/blogimages/2023-06-22-cascading-merge-requests-with-gitlab-flow/webhook.png){:\n.shadow.medium}\n\n\n## Configure your Cascading Merge rule\n\nCreate a file called ucascade.json at the root level of your GitLab project\nas defined in [configuration\nfile](https://unblu.github.io/ucascade/tech-docs/11_ucascade-configuration-file.html#_configuration_file)\nand matched with your release definition.\n\n\n![configuration](https://about.gitlab.com/images/blogimages/2023-06-22-cascading-merge-requests-with-gitlab-flow/configuration.png){:\n.shadow.medium}\n\n\n## Testing the Cascading Merge\n\nNow create a branch and an MR from your default branch, make a change, and\nmerge it. The ucascade bot will propagate the change to all other release\nbranches by automatically creating cascading MRs. The following video\ndemonstrates the process:\n\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Ej7xf8axWMs\" title=\"Cascading Merge Approach\"\n  frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\n# Additional resources\n\nFind more information about the `ucascade` bot in the [ucascade\ndocumentation](https://unblu.github.io/ucascade/index.html).\n\n\n_Special thank you to Jérémie Bresson for authoring and open sourcing this\namazing bot!_\n",[9,1489,1105,917],"code review",{"slug":1491,"featured":91,"template":700},"cascading-merge-requests-with-gitlab-flow","content:en-us:blog:cascading-merge-requests-with-gitlab-flow.yml","Cascading Merge Requests With Gitlab Flow","en-us/blog/cascading-merge-requests-with-gitlab-flow.yml","en-us/blog/cascading-merge-requests-with-gitlab-flow",{"_path":1497,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1498,"content":1504,"config":1510,"_id":1512,"_type":14,"title":1513,"_source":16,"_file":1514,"_stem":1515,"_extension":19},"/en-us/blog/cd-unified-monitor-deploy",{"title":1499,"description":1500,"ogTitle":1499,"ogDescription":1500,"noIndex":6,"ogImage":1501,"ogUrl":1502,"ogSiteName":685,"ogType":686,"canonicalUrls":1502,"schema":1503},"GitLab's unifiied and integrated monitoring strategies","Learn about GitLab’s unified and integrated monitoring capabilities and advanced deployment strategies.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681771/Blog/Hero%20Images/CD-1st-mkt-diff-cover-1275x849.jpg","https://about.gitlab.com/blog/cd-unified-monitor-deploy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's unifiied and integrated monitoring strategies\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2020-11-23\",\n      }",{"title":1499,"description":1500,"authors":1505,"heroImage":1501,"date":1507,"body":1508,"category":978,"tags":1509},[1506],"Cesar Saavedra","2020-11-23","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nA well integrated and consistent approach to monitoring what is running in production and how it is running can provide not only useful information about the infrastructure and applications but also a feedback loop about how your end users are utilizing your business applications. The ability to visualize what goes into production, what to deploy to production, and who to deploy it to can provide organizations the data to help them select and prioritize capabilities that matter to their customers. In addition, the ability to monitor performance and tracing of deployments allows them to preempt production problems, quickly troubleshoot issues and rollback a release, if needed.\n\nGitLab provides the ability to monitor the performance of a deployment and easily rollback if needed. It also empowers you to choose what to deploy and who to deploy to in production via Feature Flags as well as advanced deployment strategies, like Canary deployments, in a consistent, repeatable, and uniform manner to help make your releases safe, low risk, and worry-free.\n\n\nLet’s first delve into how GitLab provides the capabilities to quickly release, identify production problems and quickly roll back.\n\nFor a release manager, the Environment Dashboard provides a cross-project environment-based view with the big picture of what is going on in each environment:\n\n![environment dashboard](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/Env-dashboard.png){: .shadow.medium.center.wrap-text}\n\nThe Environment Dashboard also gives easy access to the CD pipeline. In the picture above, clicking on the “blocked” link takes you to the CD pipeline view:\n\n![CD pipeline](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/CD-pipeline.png){: .shadow.medium.center.wrap-text}\n\nFrom the CD pipeline, a release manager can perform a canary deployment and also roll out to production incrementally, for example. The performance job above runs web browser performance tests and determines any degradation or improvement in the measurements and reports them as shown below:\n\n![webperf errors](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/Review-webperf-errors.png){: .shadow.medium.center.wrap-text}\n\nA release manager can take this information into consideration to determine whether or not these errors warrant a rollback of the release from production.\n\n![rollback button](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/Rollback-click.png){: .shadow.medium.center.wrap-text}\n\nFrom the production environment window, depicted above, clicking on the rollback environment button, will reset the production to its previous working state.\n\nIT teams often run into issues when building and releasing software and without direct user feedback, they often build out too many features, many of which go unused. Without the ability to test in production, IT organizations spend more time on testing, prolonging release cycles, but quality is only marginally improved. Modern IT teams can overcome these issues by using experimentation systems capabilities, such as feature flags and canary deployments.\n\n![feature flags screen](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/ff-screen.png){: .shadow.medium.center.wrap-text}\n\nGitLab supports Feature Flags as shown above. In the example, the defined feature flag named “prods-in-alpha-order-ff” has three strategies:\n\n- For the production environment: provide the feature to 50% of users based on the availability of their IDs\n\n- For the staging environment: provide the feature to the users listed in the user list “prods-in-alpha-order-user-list”\n\n- For the review environment: provide the feature to only one user.\n\nFeature Flags can also be combined with canary deployments. For example, in the picture below, the release manager has chosen to release the canary to half of the nodes in production:\n\n![50 percent rollout](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/CD-pipeline-50-percent.png){: .shadow.medium.center.wrap-text}\n\nAnd this combined deployment can be visualized via the deploy board as follows:\n\n![deploy board](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/deploy-board.png){: .shadow.medium.center.wrap-text}\n\nAbove, production has four nodes, two of which are running the new canary deployment, and the other two are still running the current production deployment.\n \nThe combination of canary deployments and feature flags can help gather direct users’ feedback to determine what features are relevant to them, so that an IT organization can focus on these, to shorten release cycle times and deliver higher quality and differentiating value to their users.\n\nLastly, integrated monitoring plays an important role in the feedback loop for these advanced deployment strategies and experimentation systems. With GitLab’s unified and integrated monitoring, you can track system and application metrics cluster-wide as well as per pod.\n\n![clusterwide monitoring](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/clusterwide-monitoring.png){: .shadow.medium.center.wrap-text}\n\nIn the picture above, you can see the dashboards that monitor clusterwide metrics. And the picture below shows the dashboards that monitor pod-specific metrics:\n\n![podspecific monitoring](https://about.gitlab.com/images/blogimages/cd-unified-monitor-deploy/podspecific-monitoring.png){: .shadow.medium.center.wrap-text}\n\nGitLab provides the ability to monitor the performance of a deployment and easily rollback if needed. It also empowers you to choose what to deploy and who to deploy to in production via Feature Flags as well as advanced deployment strategies, like Canary deployments, in a consistent, repeatable, and uniform manner to help make your releases safe, low risk, and worry-free.\n\nIf you’d like to see some of GitLab’s unified and integrated monitoring capabilities and advanced deployment strategies in action, watch this [video](https://youtu.be/ihdxpO5rgSc).\n\nFor more information, visit [LEARN@GITLAB](https://about.gitlab.com/learn/).\n\n\n",[9,721,896],{"slug":1511,"featured":6,"template":700},"cd-unified-monitor-deploy","content:en-us:blog:cd-unified-monitor-deploy.yml","Cd Unified Monitor Deploy","en-us/blog/cd-unified-monitor-deploy.yml","en-us/blog/cd-unified-monitor-deploy",{"_path":1517,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1518,"content":1524,"config":1531,"_id":1533,"_type":14,"title":1534,"_source":16,"_file":1535,"_stem":1536,"_extension":19},"/en-us/blog/certificate-based-kubernetes-integration-sunsetting-on-gitlab-com",{"title":1519,"description":1520,"ogTitle":1519,"ogDescription":1520,"noIndex":6,"ogImage":1521,"ogUrl":1522,"ogSiteName":685,"ogType":686,"canonicalUrls":1522,"schema":1523},"Certificate-based Kubernetes integration sunsetting on GitLab.com","Learn how to check if you are impacted by the sunsetting in May 2026 and the steps needed to migrate to our proposed alternatives, including the GitLab agent for Kubernetes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662245/Blog/Hero%20Images/blog-image-template-1800x945__16_.png","https://about.gitlab.com/blog/certificate-based-kubernetes-integration-sunsetting-on-gitlab-com","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Certificate-based Kubernetes integration sunsetting on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2025-02-17\",\n      }",{"title":1519,"description":1520,"authors":1525,"heroImage":1521,"date":1527,"body":1528,"category":693,"tags":1529,"updatedDate":1530},[1526],"Viktor Nagy","2025-02-17","__*Note: In a previously published version of this article, we stated that the certificate-based Kubernetes integration would be sunset in GitLab 18.0 in May 2025. That timeline has been extended to GitLab 19.0, planned for May 2026. See the [deprecation notice](https://docs.gitlab.com/update/deprecations/#gitlab-self-managed-certificate-based-integration-with-kubernetes) for details.*__\n\nThe certificate-based Kubernetes integration was [deprecated in GitLab November 2021](https://about.gitlab.com/blog/deprecating-the-cert-based-kubernetes-integration/), and is available on GitLab.com only to previous users. In May 2026, the integration will sunset on GitLab.com and will stop working. Customers often use the integration to deploy applications to production and non-production environments. As a result, failure to migrate to other options could cause a critical incident in your application delivery pipelines. This post outlines the alternative features that GitLab offers, points out how you can identify the potential impact on your GitLab.com groups and projects, and offers links to the GitLab documentation to learn more about the necessary migration steps.\n\n## Recommended alternative: The GitLab agent for Kubernetes\n\nThe GitLab agent for Kubernetes represents a significant advancement over the certificate-based integration, offering enhanced security, reliability, and functionality. Here are the key benefits of migrating to the agent-based approach:\n\n### Enhanced security  \n- Eliminates the need for storing cluster credentials in GitLab  \n- Provides secure, bidirectional communication between GitLab and your clusters  \n- Supports fine-grained access control and authorization policies  \n- Enables secure GitOps workflows with pull-based deployments\n\n### Improved reliability  \n- Maintains persistent connections, reducing deployment failures  \n- Handles network interruptions gracefully  \n- Provides better logging and troubleshooting capabilities  \n- Supports automatic reconnection and state recovery\n\n### Advanced features  \n- Real-time cluster information integrated into the GitLab UI  \n- Integration with GitLab CI/CD pipelines  \n- Support for multiple clusters and multi-tenant environments  \n- Enhanced GitOps capabilities by integrating with FluxCD\n\n## Get started with the GitLab agent for Kubernetes\n\nIf you haven't tried the GitLab Agent for Kubernetes yet, we strongly recommend going through the [getting started guides](https://docs.gitlab.com/ee/user/clusters/agent/getting_started). These guides will walk you through the basic setup and help you understand how the agent works in your environment. The hands-on experience will help make the migration process smoother.\n\n## Impact assessment\n\nWe implemented a [dedicated API](https://docs.gitlab.com/ee/api/cluster_discovery.html) endpoint to query all the certificate-based clusters within a GitLab group hierarchy. We recommend starting with this API to see if you have any clusters that need to be migrated.\n\nOnce you identify the clusters, you should:\n1. Find group and project owners using the certificate-based integration.  \n2. Check CI/CD pipelines for direct Kubernetes API calls.  \n3. Identify Auto DevOps projects using the old integration.  \n4. List any GitLab-managed clusters in use.  \n5. Set up the agent in the affected clusters. \n6. Follow the guidance provided in this post and record your progress in a tracking issue.\n\n## Update your CI/CD integration\n\nThe legacy certificate-based integration works using GitLab CI/CD. Because the agent seamlessly integrates with GitLab CI/CD pipelines, you can use it to replace the certificate-based integration with relatively little effort. The agent-based CI/CD integration offers several improvements over the certificate-based approach:\n\n1. **Direct cluster access:** CI/CD jobs can interact with clusters through the agent without requiring separate credentials.  \n2. **Enhanced security:** You don't need to store cluster credentials in CI/CD variables. \n3. **Simplified configuration:** A single agent configuration file manages all cluster interactions.  \n4. **Better performance:** Persistent connections reduce deployment overhead.  \n5. **Flexible authorization:** On GitLab Premium and Ultimate, you can rely on impersonation features to restrict CI/CD jobs in the cluster.\n\nAt a high level, there are three steps to migrating your existing CI/CD pipelines:  \n1. Set up the agent by following [the getting started guides](https://docs.gitlab.com/ee/user/clusters/agent/getting_started).  \n2. [Share the agent connection with the necessary groups and projects.](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html#authorize-the-agent). \n3. [Select the agent in the pipeline jobs.](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html#update-your-gitlab-ciyml-file-to-run-kubectl-commands)\n\nYou can read more about [migrating Kubernetes deployments in general](https://docs.gitlab.com/ee/user/infrastructure/clusters/migrate_to_gitlab_agent.html) or about [the agent CI/CD integration](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html) in the documentation.\n\n## Migrate your Auto DevOps configuration\n\nAuto DevOps is a set of CI/CD templates that are often customized by users. With Auto DevOps, you can automatically configure your CI/CD pipelines to build, test, and deploy your applications based on best practices. It's commonly used with the certificate-based integration for deploying applications to Kubernetes clusters. \n\nIf you use Auto DevOps and you rely on the certificate-based integration, you need to transition to the agent-based deployment mechanism. The migration process is straightforward:\n1. Set up the CI/CD integration as described above.  \n2. Configure the `KUBE_CONTEXT` environment variable to select an agent.  \n4. Remove the old certificate-based cluster integration.\n\nYou can read more about [using Auto DevOps with the agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html\\#environments-that-use-auto-devops) in the documentation.\n\n## Transition from GitLab-managed clusters to GitLab-managed Kubernetes resources\n\nWith GitLab-managed clusters, GitLab automatically creates and manages Kubernetes resources for your projects. When you allow GitLab to manage your cluster, it creates RBAC resources like a Namespace and ServiceAccount. \n\nIf you use GitLab-managed clusters, you should transition to GitLab-managed Kubernetes resources, which offers a more flexible and secure approach to cluster management.\n\nTo migrate: \n1. Document your existing cluster configuration.  \n2. Create corresponding Kubernetes resource definitions.  \n3. Store configurations in your repository.  \n4. Configure the GitLab agent to manage these resources.  \n5. Verify resource management and deployment. \n6. Remove the old cluster integration.\n\nYou can read more about [GitLab-managed Kubernetes resources](https://docs.gitlab.com/ee/user/clusters/agent/getting\\_started) in the documentation.\n\n## Manage cloud provider clusters created through GitLab\n\nIf you created Kubernetes clusters through the GitLab integration with Google Kubernetes Engine (GKE) or Amazon Elastic Kubernetes Service (EKS), these clusters were provisioned in your respective cloud provider accounts. After the certificate-based integration is removed:\n1. Your clusters will remain fully operational in Google Cloud or AWS.  \n2. You will need to manage these clusters directly through your cloud provider's console:  \n   - GKE clusters through Google Cloud Console  \n   - EKS clusters through AWS Management Console\n\nTo view cluster information within GitLab:\n 1. Install the GitLab agent for Kubernetes. \n 1. Configure the Kubernetes dashboard integration.  \n 1. Check the dashboard for cluster details and resource information.\n\nThis change only affects how you interact with the clusters through GitLab – it does not impact the clusters' operation or availability in your cloud provider accounts.\n\nYou should still migrate your deployment setups as described above.\n\n## What should I do next?\n\nTo minimize the impact to you and your infrastructure, you should follow these steps:\n1. Check if you are impacted as soon as possible.  \n2. Plan your migration timeline before May 2026.  \n3. Start with non-production environments to gain experience.  \n4. Document your current setup and desired state.  \n5. Test the agent-based approach in a staging environment.  \n6. Gradually migrate production workloads.  \n7. Monitor and validate the new setup.\n\nThe migration to the GitLab agent for Kubernetes represents a significant improvement in how GitLab interacts with Kubernetes clusters. While the migration requires careful planning and execution, the benefits in terms of security, reliability, and functionality make it a worthwhile investment for your DevSecOps infrastructure.",[9,1228,693,495],"2025-04-18",{"slug":1532,"featured":6,"template":700},"certificate-based-kubernetes-integration-sunsetting-on-gitlab-com","content:en-us:blog:certificate-based-kubernetes-integration-sunsetting-on-gitlab-com.yml","Certificate Based Kubernetes Integration Sunsetting On Gitlab Com","en-us/blog/certificate-based-kubernetes-integration-sunsetting-on-gitlab-com.yml","en-us/blog/certificate-based-kubernetes-integration-sunsetting-on-gitlab-com",{"_path":1538,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1539,"content":1545,"config":1551,"_id":1553,"_type":14,"title":1554,"_source":16,"_file":1555,"_stem":1556,"_extension":19},"/en-us/blog/ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups",{"title":1540,"description":1541,"ogTitle":1540,"ogDescription":1541,"noIndex":6,"ogImage":1542,"ogUrl":1543,"ogSiteName":685,"ogType":686,"canonicalUrls":1543,"schema":1544},"CI/CD automation: Maximize 'deploy freeze' impact across GitLab groups","Learn the benefits of managing deploy freezes at the group level and follow step-by-step guidance on implementation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667913/Blog/Hero%20Images/clocks.jpg","https://about.gitlab.com/blog/ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"CI/CD automation: Maximize 'deploy freeze' impact across GitLab groups\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christian Nnachi\"}],\n        \"datePublished\": \"2024-02-08\",\n      }",{"title":1540,"description":1541,"authors":1546,"heroImage":1542,"date":1548,"body":1549,"category":741,"tags":1550},[1547],"Christian Nnachi","2024-02-08","In the dynamic landscape of continuous integration and continuous deployment ([CI/CD](https://about.gitlab.com/topics/ci-cd/)), maintaining system stability during critical periods such as holidays, product launches, or maintenance windows can be challenging. Introducing new code during peak activity times raises the risk of issues affecting user experience. To strike a balance between innovation and stability, organizations may require a group-level deploy freeze — a strategic pause in deploying new code changes across groups to certain branches or environments.\n\n**Given that GitLab can be used for both continuous integration and continuous deployment efforts, GitLab's [Deploy Freeze](https://docs.gitlab.com/ee/user/project/releases/index.html#prevent-unintentional-releases-by-setting-a-deploy-freeze)** capability aims to address this exact need.\n\nScoped at the project level, deploy freezes can prevent unintended production releases during a period of time you specify by setting a deploy freeze period. Deploy freezes help reduce uncertainty and risk when continuously deploying changes for a single project.\n\nMost teams, however, do not have a single project that represents all of their production environment. Given that deploy freezes are set at the project level, managing and enforcing deploy freezes across many projects can be an arduous and error-prone task, leading to unpredictability and disruption. The need for an automated cross-project solution to ensure stability is obvious.\n\n## What is a group deploy freeze?\n\nThe [Group Deploy Freeze project](https://gitlab.com/cnnachi-demo/freezeperiods) takes the concept of individual project deploy freezes to the next level. It enables you to enforce the same deployment restrictions across one or many projects within a GitLab group from the GitLab UI.\n\nWhether you're managing a large suite of microservices or a collection of related projects, a group-managed deploy freeze solution provides a centralized mechanism to maintain stability.\n\n### Benefits of group deploy freeze\n\n**1. Centralized control**\n\nAdherence to your deployment strategy by allowing you to manage deploy freezes for multiple projects from a single location. This simplifies the process and reduces human errors.\n\n**2. Group-wide synchronization**\n\nEnforcing deploy freezes across an entire GitLab group ensures that all projects receive the same schedule at the same time. This maintains uniformity across your projects.\n\n**3. Streamlined collaboration**\n\nVisibility of changes to your development and operations teams can align their efforts effectively.\n\n## How to use GitLab Group Deploy Freeze\n\nWith [Group Deploy Freeze](https://gitlab.com/demos/solutions/group-deploy-freeze), GitLab CI becomes a general-purpose automation tool for ops-related changes, like setting deploy freezes on many projects.\n\nIn the following steps, you will successfully set up the Group Deploy Freeze feature. Remember to test thoroughly and consider any specific nuances of your team's deployment process.\n\n### Prerequisites\n\n- **GitLab account -** You need an active GitLab account with the necessary permissions to access and manage the projects within the target GitLab group.\n- **GitLab Personal Access Token (PAT) -** Generate a GitLab PAT with the permissions to read and write to the projects within the target GitLab group via the GitLab API. This token will be used by the Python script to authenticate API requests.\n- **Python environment -** Ensure that you have a Python environment set up on your machine or the environment where you plan to run the Python script. The script is written in Python, so you need a compatible Python interpreter.\n- **Python libraries -** Install the required Python libraries used by the script. These include requests, envparse, and python-gitlab. You can use pip to install these libraries.\n- **GitLab Group details -** Identify the GitLab group for which you want to manage deploy freezes. You'll need the group's slug (path) to specify which group the script will operate on.\n- **Time zone selection -** Decide on the time zone in which you want to schedule the deploy freezes. The time zone selection ensures that freeze periods are accurately timed based on your organization's preferred time zone.\n\n### Getting started\n\nTo use GitLab CI to author and automate the process of batch updating deploy freezes for all projects, fork the [Deploy Freeze project](https://gitlab.com/cnnachi-demo/freezeperiods), which will then create a CI/CD pipeline that iterates through your projects and applies the desired deploy freeze schedule. You can customize this project to fit your organization's workflow.\n\nThe provided project contains a `.gitlab-ci.yml` file and a Python script designed to automate the management of deploy freezes for multiple projects within a GitLab group. It uses the GitLab API and various Python libraries to create and delete deploy freeze periods, and is designed to be run as part of a CI/CD pipeline to ensure code stability during deployments within a GitLab group.\n\n### Commit and push changes\n\nCommit and push the changes to your repository to trigger the CI/CD pipeline.\n\n### Pipeline execution\n\n- In the [Group Deploy Freeze project](https://gitlab.com/demos/solutions/group-deploy-freeze) on the GitLab UI, go to Pipelines.\n- Select the \"Run Pipeline\" option on the top right corner of the page.\n- You should see the variables defined in the `.gitlab-ci.yml` file like:\n![Set variables](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676891/Blog/Content%20Images/Screenshot-2023-09-06-at-12-08-48-PM.png)\n- Define the values of the variables `FREEZE_START`, `FREEZE_END`, `CRON_TIME_ZONE` and `GROUP_SLUG`, then run the pipeline. You can define multiple freeze periods by skipping to the next line within the `FREEZE_START` and `FREEZE_END` variables.\n- Once the pipeline is successful, the freeze period should be populated in all projects within the defined groups.\n\n## Monitor and verify\n\n- Verify that these deploy freeze periods are being created and managed as intended.\n- Check your GitLab group's projects for deploy freezes during the specified periods.\n![Monitor and verify](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676891/Blog/Content%20Images/Screenshot-2023-09-12-at-2-08-24-PM.png)\n\n## Customization and iteration\n\n- If needed, iterate on the configuration, script, or pipeline based on your organization's requirements.\n- Make adjustments to freeze periods, time zones, project details, or other settings as needed.\n\nYou can optimize the group deploy feature by following the [Deploy freeze](https://docs.gitlab.com/ee/user/project/releases/index.html#prevent-unintentional-releases-by-setting-a-deploy-freeze) documentation, which outlines the steps to set up a `.freezedeployment` job that can conditionally block deployment jobs upon the presence of the `CI_DEPLOY_FREEZE` variable. By including the `.freezedeployment` template and extending it in your project's `.gitlab-ci.yml file`, you can prevent deployments during freeze periods, ensuring code stability. Manual deployment intervention is possible once the freeze period ends, allowing for controlled and predictable deployment processes across the group's projects.\n\n## Results\n\nBy extending deploy freezes to the group level, teams can easily streamline and enhance their deployment strategies to ensure consistency in preventing unintended production release during a period of time specified by you, whether it is a large company event or holiday. With the power of GitLab's API, CI/CD pipelines, and the flexibility of Python scripting, Group Deploy Freeze is your ally in maintaining code stability and predictability across diverse projects.\n\n> Get started with group deploy freezes today by visiting the [Group Deploy Freeze project](https://gitlab.com/cnnachi-demo/freezeperiods).",[9,693,1042,917],{"slug":1552,"featured":6,"template":700},"ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups","content:en-us:blog:ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups.yml","Ci Cd Automation Maximize Deploy Freeze Impact Across Gitlab Groups","en-us/blog/ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups.yml","en-us/blog/ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups",{"_path":1558,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1559,"content":1565,"config":1571,"_id":1573,"_type":14,"title":1574,"_source":16,"_file":1575,"_stem":1576,"_extension":19},"/en-us/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch",{"title":1560,"description":1561,"ogTitle":1560,"ogDescription":1561,"noIndex":6,"ogImage":1562,"ogUrl":1563,"ogSiteName":685,"ogType":686,"canonicalUrls":1563,"schema":1564},"CI/CD Catalog goes GA: No more building pipelines from scratch","The CI/CD Catalog becomes generally available in GitLab 17.0. Get to know the capabilities for discovering and sharing pipeline building blocks to help standardize and scale pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098794/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%289%29_DoeBNJVrhv9FpF3WCsHNc_1750098793762.png","https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"CI/CD Catalog goes GA: No more building pipelines from scratch\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2024-05-08\",\n      }",{"title":1560,"description":1561,"authors":1566,"heroImage":1562,"date":1568,"body":1569,"category":693,"tags":1570},[1567],"Dov Hershkovitch","2024-05-08","GitLab's [CI/CD Catalog](https://docs.gitlab.com/ee/ci/components/#cicd-catalog) becomes generally available in 17.0 (May 16, 2024), enabling all GitLab users to discover, reuse, and contribute CI/CD components easily. The CI/CD Catalog boosts collaboration and efficiency when creating pipeline configurations by allowing access to a treasure trove of pre-built components, ready to seamlessly integrate into DevSecOps workflows. Enterprises can use the CI/CD Catalog's centralized platform to standardize workflows across the whole organization.\n\nWith the CI/CD Catalog, GitLab is introducing several key capabilities that are also generally available.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n## Components and inputs\nThe [CI/CD Catalog](https://about.gitlab.com/blog/introducing-the-gitlab-ci-cd-catalog-beta/) draws its strength from two fundamental features: components and inputs. These capabilities form the backbone of the catalog, enabling developers and DevSecOps teams to streamline their pipeline development. Let’s dive into each of these features:\n\n### Components\n\n#### What are components?\nComponents are reusable, single-purpose building blocks that abstract away the complexity of pipeline configuration. Think of them as Lego pieces for your CI/CD workflows. By using components, you can assemble pipelines more efficiently without starting from scratch each time.\n\n#### Types of components\n- Template-type components: These components resemble CI templates and come with predefined input definitions. They are organized within a specific directory structure, which you can easily plug into your pipelines.\n- CI Steps (upcoming): This new type of component, which is available as an [experimental feature](https://docs.gitlab.com/ee/ci/steps/), will become a first-class object in the CI/CD Catalog, so stay tuned for this exciting addition.\n\n### Inputs\n\n#### What is Inputs Interpolation?\n\nInputs Interpolation is a powerful feature that allows you to define input parameters for includable configuration files. By using the [spec: inputs keyword](https://docs.gitlab.com/ee/ci/yaml/#specinputs) within your component configuration, you can dynamically replace almost any keywords within components with parameters. This flexibility extends to adjusting stages, scripts, or job names, supporting various data types making the component fully flexible to your needs.\n\n##### Scoped and effective\nImportantly, inputs are scoped exclusively to the included configuration. This prevents unintended effects on the rest of your pipeline. With Inputs Interpolation, you can declare and enforce constraints seamlessly, ensuring smooth integration of components.\n\nWhether you’re a seasoned DevOps pro or just starting out, the CI/CD Catalog, components, and Inputs Interpolation will transform your pipeline development experience.\n\n## How to access CI/CD Catalog components\nThe CI/CD Catalog is a powerful resource for developers and DevOps teams. It allows you to share and discover pre-built components, streamlining your pipeline development. Here’s how it works:\n\n1. Components are standalone building blocks that simplify pipeline configuration. You can create custom components tailored to your needs. But how do you make them available to others? That’s where the CI/CD Catalog comes in.\n\n2. How to publish to the CI/CD Catalog\n    - To share your components with the community, follow these steps:\n      - Use a simple CI job to publish your component and make it discoverable in the CI/CD Catalog.\n      - Whether it’s a reusable script, a deployment template, or any other pipeline element, the CI/CD Catalog is the perfect place to contribute.\nComponents released to the CI/CD Catalog should be tagged with a [semantic version](https://docs.gitlab.com/ee/ci/components/#semantic-versioning) using three digits.\n    - By sharing your components, you contribute to a growing library of resources that benefit the entire community.\n3. Catalog index page\n    - The main page of the CI/CD Catalog (also known as the index page) provides an overview of available projects with published components. Anyone can access the catalog and search for a component that suits their needs.\n    - The index page features two tabs:\n      - All: Displays all component projects that have been published and visible to you.\n      - Your groups: Shows components published within a namespace you’re part of.\n\n![CI/CD Catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098805/Blog/Content%20Images/Blog/Content%20Images/catalog_index_aHR0cHM6_1750098804807.png)\n\n4.  Catalog details page\n\n- Upon clicking on one of the projects in the CI/CD Catalog, you will be redirected to the details page where you can view the available components in that project. \n    - Note that there could be multiple components in a single project.\n\n- The details page features two tabs:\n\u003Ccenter>\u003Ci>Readme: Displays the readme.md of the project that was previously configured by the user.\u003C/i>\u003C/center>\n\n![readme tab](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098805/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098804808.png)\n\n\u003Ccenter>\u003Ci>Components: Displays the detailed information for each component such as inputs table syntax to use and more. This information is generated and displayed automatically to help keep it up to date.\u003C/i>\u003C/center>\n\n![components tab](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098805/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098804809.png)\n\n## Using a component\n\nTo use a component from the CI/CD Catalog, simply copy the suggested snippet to your pipeline configuration. For example: \n\n```yaml\n\ninclude: \n  - component:   gitlab.com/google-gitlab-components/cloud-run/deploy-cloud-run@0.1.0\n\n```\n\nNote that the snippet contains the fully qualified domain name of the component, so if you moved or clone the component to a different location, you should make sure the FQDN is accurate. You can use the $CI_SERVER_FQDN variable instead of hardcoding the FQDN in your pipeline configuration.\n\nA component can be referenced using the following:\n\n- a commit SHA, for example, e3262fdd0914fa823210cdb79a8c421e2cef79d. We highly recommend using this with $CI_COMMIT_SHA variable in your `.gitlab.ci.yml` file to test a component before publishing it to the CI/CD Catalog.\n- a branch name, for example, main\n- a tag, for example 1.0.0\n- shorthand abbreviation 1.0, which will provide you the latest patched 1.0.x version or 1, which will provide you the latest 1.x.x minor version. This is why it is recommended to use the best practices of semantic versioning and always reference a specific version (minor, major, or a specific patch).\n- ~latest, which always points to the latest semantic version published in the CI/CD Catalog. Use ~latest only if you want to use the absolute latest version at all times, which could include breaking changes., so please use it with caution.\n\n## Understanding the CI/CD Catalog across GitLab deployments\nThe CI/CD Catalog and components offer different flavors to cater to various needs and use cases.\n\n### Private and public components\n\n#### Public components\n\n- Public components are hosted in public repositories and are accessible to everyone.\n- When a public component is published from GitLab.com to the main catalog, it becomes discoverable and available for consumption by all users.\n- We encourage users to contribute their best components to the public catalog, helping us build a thriving community.\n\n#### Private components\n\n- Private components are hosted in private repositories.\n- Visibility based on permissions: Users who access the catalog can also see and search for private components if they have permission to view the repository where the component is hosted.\n    - Private catalog option: In GitLab.com, organizations can publish private components to the main catalog in GitLab.com, thereby creating a “private catalog” with content accessible only to authorized users. \n\n### GitLab.com vs. Self-managed\n- The “public” catalog in GitLab.com: The main catalog is the one that is hosted on GitLab.com and can be accessible to anyone by going to [gitlab.com/explore/catalog](http://gitlab.com/explore/catalog). The CI/CD Catalog is:\n    - Open access: The catalog hosted on GitLab.com is available for anyone to view.\n    - Contribute and grow: By sharing components, users around the world contribute to a growing library of resources that benefits the entire community.\n\n- Self-managed customers: The CI/CD Catalog is also available for self-managed customers however it has several differences: \n    - Empty catalog: For self-managed customers, the catalog initially appears empty since it doesn't contain any available components.\n    - Organizational catalog: Each organization is responsible for its own catalog, where it can create and maintain its own library of components within this flavor.\n    - Using a component from GitLab.com: If you want to use a component from the main catalog in GitLab.com, clone the project locally and publish it to your organizational catalog. Keep in mind that upstream updates will require mirroring to receive the latest changes. You can learn more about how to do that in our [CI/CD Components documentation](https://docs.gitlab.com/ee/ci/components/#use-a-gitlabcom-component-in-a-self-managed-instance).\n\n## What’s next?\n\nThe CI/CD Catalog is only the first step in revolutionizing the way you build and display your available pipelines. Here is a glimpse of what we plan to offer to our users in the upcoming milestones.\n\n### CI Steps\n\nSteps are reusable and composable pieces of a job that can be referenced in your pipeline configuration. Each step defines structured inputs and outputs that can be consumed by other steps. Steps can come from local files, GitLab.com repositories, or any other Git source.\n\nIn GitLab, we think of steps as another type of component. We are going to make sure CI Steps will become a first-class object in the CI/CD Catalog, where users can publish, unpublish, search, and consume steps in the same way as they are using components today.\n\n### Securing your catalog workflows\n\nWe aim to empower central administrators to manage component creation, usage, and publication within their organizational catalog. We are committed to ensuring the publishing process seamlessly integrates with the organization's standards and existing workflow. We want to enable the platform administrators with the capabilities to secure and govern the CI/CD Catalog and component workflows. More information can be found in [this epic](https://gitlab.com/groups/gitlab-org/-/epics/12713).\n\n### Analytics\n\nOur goal is to empower users with seamless control over component management across pipelines, ensuring optimal version control and project alignment. This addresses the challenge of users currently lacking visibility into component usage across various project pipelines. Our objective is to provide users with the capability to swiftly identify outdated versions and take prompt corrective actions as needed. This enhancement will foster an environment where users can efficiently manage and update components, promoting both version control precision and project alignment. Read more in [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/393326).\n\n## Get started with the CI/CD Catalog\n\nThe introduction of the CI/CD Catalog revolutionizes pipeline development by offering a vast array of pre-built components. Users don't have to start building pipelines from scratch because the CI/CD Catalog provides an access point to search components and pipeline configurations. The CI/CD Catalog's availability makes accessing and sharing components effortless, fostering collaboration and community growth. Whether utilizing public or private repositories, users can leverage these resources to enhance their pipeline development experience. Moreover, while GitLab.com users benefit from an open-access catalog, self-managed customers can establish organizational catalogs tailored to their needs.\n\n> [Get to know the CI/CD Catalog](https://about.gitlab.com/free-trial/devsecops/) with a free trial of GitLab Ultimate.\n\n> Learn more about the CI/CD Catalog and components:\n> \n> - [A CI/CD component builder's journey](https://about.gitlab.com/blog/a-ci-component-builders-journey/)\n>\n> - [FAQ: GitLab CI/CD Catalog](https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog/)\n>\n> - [Documentation: CI/CD components and CI/CD Catalog](https://docs.gitlab.com/ee/ci/components/)\n> \n> - [Introducing CI/CD components and how to use them in GitLab](https://about.gitlab.com/blog/introducing-ci-components/)\n> \n",[9,696,495,695],{"slug":1572,"featured":91,"template":700},"ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch","content:en-us:blog:ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch.yml","Ci Cd Catalog Goes Ga No More Building Pipelines From Scratch","en-us/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch.yml","en-us/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch",{"_path":1578,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1579,"content":1585,"config":1590,"_id":1592,"_type":14,"title":1593,"_source":16,"_file":1594,"_stem":1595,"_extension":19},"/en-us/blog/ci-cd-changing-roles",{"title":1580,"description":1581,"ogTitle":1580,"ogDescription":1581,"noIndex":6,"ogImage":1582,"ogUrl":1583,"ogSiteName":685,"ogType":686,"canonicalUrls":1583,"schema":1584},"A surprising benefit of CI/CD: Changing development roles","DevOps and CI/CD make for faster code release, but they're also causing sweeping changes in dev and ops roles and responsibilities.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668027/Blog/Hero%20Images/cicd.jpg","https://about.gitlab.com/blog/ci-cd-changing-roles","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A surprising benefit of CI/CD: Changing development roles\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-07-16\",\n      }",{"title":1580,"description":1581,"authors":1586,"heroImage":1582,"date":1587,"body":1588,"category":1040,"tags":1589},[1037],"2020-07-16","\n\nWhen it comes to [CI/CD](/topics/ci-cd/) and [DevOps](/topics/devops/), the benefits are obvious: Get it right and cleaner code is released (a lot) faster.\n\nBut our [2020 Global DevSecOps Survey](/developer-survey/previous/2020/) found more subtle – and far less talked about – benefits. [CI/CD](https://docs.gitlab.com/ee/ci/) doesn't just allow developers to move faster and do more, it also allows them (and their operations counterparts) **to do less**. The automation required by CI/CD has drastically reduced the manual tasks involved in software development. With fewer time-consuming tasks, Dev and Ops roles and responsibilities are changing, in some cases dramatically.\n\nBut don't just take our word for it. We asked our 2020 survey takers to tell us in their own words how their roles and responsibilities are changing.\n\n_Our [2022 Global DevSecOps Survey](/developer-survey/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\n## The back story\n\nTo understand the impact of CI/CD and DevOps, it helps to have the full picture. In our 2020 survey 83% of developers said they're releasing code faster than ever before. In fact, nearly 60% of them deploy multiple times a day, once a day, or once every few days (that's 15 percentage points higher than in 2019). Just in the last year about 21% of developers said their teams added CI to their process, while just over 15% brought in continuous deployment.\n\nThe benefits of these processes are clear, the developers told us:\n\n\"We've set up automated processes to build, test, and deploy code using a mixture of our own tools and open source tools.\"\n\n\"(We now have) automated tests, automated deployment on code review approval.\"\n\n\"A templatized CI/CD process has significantly sped up build and deploy times to multiple environments in multiple clouds.\"\n\n\"Automated testing using GitLab CI has meant less overhead when reviewing code and quicker and safer deploys.\"\n\n\"Automated testing and continuous integration have made our deployments safer and more optimized. Now everyone in the team has the permission to deploy the code.\"\n\n\"CI and CD tremendously reduced time for build and deploy applications and eliminated problems with the build environment.\"\n\n\"Automation has made one-click testing and deployment possible for us.\"\n\n\"Deployment has become a non-task. Bootstrapping new projects is 10x faster because of the reusable infrastructure.\"\n\n\"We reduced our CI build queue time by 75%, which allowed developers to have test results faster and allows QA to have build artifacts to test faster.\"\n\n\"Automation within the CI/CD pipeline (including test automation and the actual CD automation part) has significantly increased the delivery speed of our team.\"\n\nOne developer shared something that really resonated with us. In the pre-CI/CD world the developer had to submit a ticket to seven different departments before \"button press\" (deployment), a process that used to take six weeks. Now with automation, it takes just two hours.\n\n## Off the list\n\nWith all the changes brought by CI/CD we wondered what developers no longer have to do in order to release code. It's safe to say it was a long list! The number one change was no longer needing to do manual testing, followed closely by dropping manual deployments.\n\n\"There's no need to manually merge my code and push to staging and then production.\"\n\n\"(We don't have to) sync the code between multiple Devs – Git does it well.\"\n\n\"(I no longer have to) manually test, argue about code style, and update dependencies.\"\n\n\"We don't have to code our product to work with different platforms. We can just code our product and integrate it with a tool to work with different platforms.\"\n\n\"I never create a ticket to ask Ops to deploy.\"\n\nDevs aren't the only ones not doing things they used to. Operations team members also reported radically changing roles. Nearly 40% said their development lifecyle is \"mostly\" automated, meaning they're free now to tackle different responsibilities. Over half of them are managing cloud services, while 42% said they're now primarily managing hardware and infrastructure.\n\nThis is how they described their roles today:\n\n\"We build out and improve the CI/CD platform.\"\n\n\"I'm a Jack of all trades.\"\n\n\"Right now it's 60% new project work and 40% operations/fire-fighting/developer support.\"\n\n\"We ensure reliability and availability, improve developer efficiency, automation, tools, and observability.\"\n\n\"I keep the lights on.\"\n\n\"(I'm responsible for) anything between dev and ops. From planning to deployment but not monitoring and maintaining apps in production.\"\n\n## Lines are blurring\n\nSo at the end of the day what do these DevOps-driven changes mean for the software development lifecycle? For starters, roles are blurring. Over one-third of developers told us they define and/or create the infrastructure their apps run on and 14% monitor and respond to that infrastructure – both of these tasks were traditionally the responsibility of the operations team. In fact, nearly 70% of ops pros said their developers were able to provision their own environments.\n\nDev and ops roles are starting to converge but at the same time developers are doubling down on tasks they consider critical to improving code quality (and thus the speed of code release). Just shy of 50% of developers told us they are now conducting [code reviews](https://docs.gitlab.com/ee/development/code_review.html) weekly but a growing body of anecdotal evidence – based on write-in responses – show that for many teams daily code reviews are a reality, something that would not have been possible if they were bogged down with manual testing and deployments.\n\nGoing forward, the \"free time\" created by [CI/CD automation](https://docs.gitlab.com/ee/topics/autodevops/) won't go to waste, developers told us. A majority want to push their teams to do way more testing of all types (functional, A/B, unit, security) and of course to automate those processes.\n\nWhat should you be doing that you're not doing now?\n\n\"We want to shift left on testing.\"\n\n\"We want to write more test cases to cover 100% of everything.\"\n\n\"We want better code reviews, faster code reviews and more code reviews.\"\n\n\"We should be doing everything better.\"\n\n**Read more about CI/CD and DevOps:**\n\n- Just getting started? Get our [CI/CD guide for beginners](/blog/beginner-guide-ci-cd/)\n\n- [The pain (and promise) of code reviews](/blog/beginner-guide-ci-cd/)\n\n- [Why there is never enough testing](/blog/what-blocks-faster-code-release/)\n\nCover image by [Jason Wong](https://unsplash.com/@jasonhk1920) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9,721,1269],{"slug":1591,"featured":6,"template":700},"ci-cd-changing-roles","content:en-us:blog:ci-cd-changing-roles.yml","Ci Cd Changing Roles","en-us/blog/ci-cd-changing-roles.yml","en-us/blog/ci-cd-changing-roles",{"_path":1597,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1598,"content":1603,"config":1608,"_id":1610,"_type":14,"title":1611,"_source":16,"_file":1612,"_stem":1613,"_extension":19},"/en-us/blog/ci-cd-github-extended-again",{"title":1599,"description":1600,"ogTitle":1599,"ogDescription":1600,"noIndex":6,"ogImage":1200,"ogUrl":1601,"ogSiteName":685,"ogType":686,"canonicalUrls":1601,"schema":1602},"We're extending free usage of CI/CD for GitHub for another six months!","Get another six months' use of CI/CD for GitHub on GitLab.com, free of charge.","https://about.gitlab.com/blog/ci-cd-github-extended-again","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We're extending free usage of CI/CD for GitHub for another six months!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2019-09-09\",\n      }",{"title":1599,"description":1600,"authors":1604,"heroImage":1200,"date":1605,"body":1606,"category":300,"tags":1607},[1445],"2019-09-09","\n\n[CI/CD for GitHub](/solutions/github/) allows you to host your code on GitHub while taking advantage of GitLab for CI/CD. In fact, [CI/CD for external repos](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/) lets you use any Git repo as a host together with GitLab CI/CD.\n\nWhen we first released the ability to use GitLab CI/CD with other Git repositories we placed it into our [Premium tier](/pricing/premium/) for GitLab Self-Managed. Normally, features go into a corresponding pricing tier on GitLab.com but we believed this was a specific case where we should offer a feature for free on GitLab.com because of the amount of repos on GitHub.com. Not knowing how long we'd keep this pricing, we set a deadline of one year. When that time came, we extended for six months.\n\nToday, we are extending the deadline for using CI/CD for external repos, including CI/CD for GitHub again. Now you'll have until **Mar. 22, 2020** to use these capabilities with private repos (see below for open source) as a [Free or Bronze](/pricing/) user on GitLab.com. This feature will continue to be part of the [Premium tier](/pricing/premium/) for GitLab Self-Managed.\n\n## Always free for open source\n\nThis extension applies to private repos hosted on GitLab.com. As part of our commitment to open source, [public projects get all the features of Gold for free](/solutions/open-source/). GitLab CI/CD for GitHub works by automatically mirroring your repos to GitLab.com. As such, if you have a public project on GitHub, it will also be public on GitLab so you can always take advantage of GitLab CI/CD for public projects.\n\n## Why we're extending the offer\n\nFor a rationale on our extension see our previous blog post when we [first extended external CI/CD](/blog/six-more-months-ci-cd-github/). When it came to the current deadline we found that the reasoning still held true and [decided to extend again](https://gitlab.com/gitlab-org/gitlab-ee/issues/13065).\n\nAs always, we'd love your feedback in the comments below.\n",[9,721,268,827],{"slug":1609,"featured":6,"template":700},"ci-cd-github-extended-again","content:en-us:blog:ci-cd-github-extended-again.yml","Ci Cd Github Extended Again","en-us/blog/ci-cd-github-extended-again.yml","en-us/blog/ci-cd-github-extended-again",{"_path":1615,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1616,"content":1619,"config":1625,"_id":1627,"_type":14,"title":1628,"_source":16,"_file":1629,"_stem":1630,"_extension":19},"/en-us/blog/ci-cd-inputs-secure-and-preferred-method-to-pass-parameters-to-a-pipeline",{"noIndex":6,"title":1617,"description":1618},"CI/CD inputs: Secure and preferred method to pass parameters to a pipeline","Learn how CI/CD inputs provide type-safe parameter passing with validation, replacing error-prone variables for more reliable pipelines.",{"title":1617,"description":1618,"authors":1620,"heroImage":1621,"date":1622,"body":1623,"category":693,"tags":1624},[1567],"https://res.cloudinary.com/about-gitlab-com/image/upload/v1749658912/Blog/Hero%20Images/blog-image-template-1800x945__20_.png","2025-07-07","\nGitLab CI/CD inputs represent the future of pipeline parameter passing. As\na purpose-built feature designed specifically for typed parameters with\nvalidation, clear contracts, and enhanced security, inputs solve the\nfundamental challenges that teams have been working around with variables\nfor years.\n\nWhile CI/CD variables have served as the traditional method for passing parameters to pipelines, they were originally designed for storing configuration settings — not as a sophisticated parameter-passing mechanism for complex workflows. This fundamental mismatch has created reliability issues, security concerns, and maintenance overhead that inputs elegantly eliminate.\n\nThis article demonstrates why CI/CD inputs should be your preferred approach for pipeline parameters. You'll discover how inputs provide type safety, prevent common pipeline failures, eliminate variable collision issues, and create more maintainable automation. You'll also see practical examples of inputs in action and how they solve real-world challenges, which we hope will encourage you to transition from variable-based workarounds to input-powered reliability.\n\n## The hidden costs of variable-based parameter passing\n\nThe problems with using variables for parameter passing are numerous and frustrating. \n\n**No type validation**\n\nVariables are strings. There is no type validation, meaning a pipeline expecting a boolean or a number, but accidentally receives a string. This leads to unexpected failures deep into the pipeline execution. In the case of a deployment workflow for example, hours after it was started  a critical production deployment fails because a boolean check in a variable was not passed as expected.\n\n\n**Runtime mutability**\n\nVariables can be modified throughout the pipeline runtime, creating unpredictable behavior when multiple jobs attempt to change the same values. For example, deploy_job_a sets `DEPLOY_ENV=staging`, but deploy_job_b changes the `DEPLOY_ENV` value to `production`. \n\n\n**Security risks**\n\nSecurity concerns arise because variables intended as simple parameters often receive the same access permissions as sensitive secrets. There's no clear contract defining what parameters a pipeline expects, their types, or their default values. A simple `BUILD_TYPE` parameter, that seems innocuous at first glance, suddenly has access to production secrets simply because variables do not inherently distinguish between parameters and sensitive data.\n\n\nPerhaps most problematically, error detection happens too late in the process. A misconfigured variable might not cause a failure until minutes or even hours into a pipeline run, wasting valuable CI/CD resources and developer time. Teams have developed elaborate workarounds such as custom validation scripts, extensive documentation, and complex naming conventions just to make variable-based parameter passing somewhat reliable.\n\nMany users have requested local debugging capabilities to test pipeline configurations before deployment. While this seems like an obvious solution, it quickly breaks down in practice. Enterprise CI/CD workflows integrate with dozens of external systems — cloud providers, artifact repositories, security scanners, deployment targets — that simply can't be replicated locally. Even if they could, the complexity would make local testing environments nearly impossible to maintain. This mismatch forced us to reframe the problem entirely. Instead of asking \"How can we test pipelines locally?\" we started asking \"How can we prevent configuration issues caused by variable-based parameter passing before users run a CI/CD automation workflow?\"\n\n## Understanding variable precedence\n\nGitLab's variable system includes multiple [precedence levels](https://docs.gitlab.com/ci/variables/#cicd-variable-precedence) to provide flexibility for different use cases. While this system serves many valid scenarios like allowing administrators to set instance- or group-wide defaults while letting individual projects override them when needed, it can create challenges when building reusable pipeline components.\n\n\nWhen creating components or templates that will be used across different projects and groups, the variable precedence hierarchy can make behavior less predictable. For example, a template that works perfectly in one project might behave differently in another due to group- or instance-level variable overrides that aren't visible in a pipeline configuration.\n\n\nWhen including multiple templates, it also can be challenging to track which variables are being set where and how they might interact.\n\n\nIn addition, components authors need to document not just what variables their template uses, but also potential conflicts with variables that might be defined at higher precedence levels.\n\n\n### Variable precedence examples\n\n\n**Main pipeline file (`.gitlab-ci.yml`):**\n\n\n```yaml\n\nvariables:\n  ENVIRONMENT: production  # Top-level default for all jobs\n  DATABASE_URL: prod-db.example.com\n\ninclude:\n  - local: 'templates/test-template.yml'\n  - local: 'templates/deploy-template.yml'\n```\n\n\n**Test template (`templates/test-template.yml`):**\n\n\n```yaml\n\nrun-tests:\n  variables:\n    ENVIRONMENT: test  # Job-level variable overrides the default\n  script:\n    - echo \"Running tests in $ENVIRONMENT environment\"  \n    - echo \"Database URL is $DATABASE_URL\"  # Still inherits prod-db.example.com!\n    - run-integration-tests --env=$ENVIRONMENT --db=$DATABASE_URL\n    `# Issue: Tests run in \"test\" environment but against production database`\n\n```\n\n\n**Deploy template (`templates/deploy-template.yml`):**\n\n\n``` yaml\n\ndeploy-app:\n  script:\n    - echo \"Deploying to $ENVIRONMENT\"  # Uses production (top-level default)\n    - echo \"Database URL is $DATABASE_URL\"  # Uses prod-db.example.com\n    - deploy --target=$ENVIRONMENT --db=$DATABASE_URL\n    # This will deploy to production as intended\n```\n\n**The challenges in this example:**\n\n\n1. Partial inheritance: The test job gets `ENVIRONMENT=test` but still inherits `DATABASE_URL=prod-db.example.com`.  \n\n2. Coordination complexity: Template authors must know what top-level variables exist and might conflict.  \n\n3. Override behavior: Job-level variables with the same name override defaults, but this isn't always obvious.  \n\n4. Hidden dependencies: Templates become dependent on the main pipeline's variable names.\n\n\nGitLab recognized these pain points and introduced [CI/CD inputs](https://docs.gitlab.com/ee/ci/inputs/) as a purpose-built solution for passing parameters to pipelines, offering typed parameters with built-in validation that occurs at pipeline creation time rather than during execution.\n\n\n## CI/CD inputs fundamentals\n\n\nInputs provide typed parameters for reusable pipeline configuration with built-in validation at pipeline creation time, designed specifically for defining values when the pipeline runs. They create a clear contract between the pipeline consumer and the configuration, explicitly defining what parameters are expected, their types, and constraints.\n\n\n### Configuration flexibility and scope\n\n\nOne of the advantages of inputs is their configuration-time flexibility. Inputs are evaluated and interpolated during pipeline creation using the interpolation format `$[[ inputs.input-id ]]`, meaning they can be used anywhere in your pipeline configuration — including job names, rules conditions, images, and any other YAML configuration element. This eliminates the long-standing limitation of variable interpolation in certain contexts.\n\n\nOne common use case we've seen is that users define their job names like `test-$[[ inputs.environment ]]-deployment`.\n\n\nWhen using inputs in job names, you can prevent naming conflicts when the same component is included multiple times in a single pipeline. Without this capability, including the same component twice would result in job name collisions, with the second inclusion overwriting the first. Input-based job names ensure each inclusion creates uniquely named jobs.\n\n\n**Before inputs:**\n\n\n```yaml\n\ntest-service:\n  variables:\n    SERVICE_NAME: auth-service\n    ENVIRONMENT: staging\n  script:\n    - run-tests-for $SERVICE_NAME in $ENVIRONMENT\n```\n\n\n**With inputs:**\n\n\n```yaml\n\nspec:\n  inputs:\n    environment:\n      type: string\n    service_name:\n      type: string\n\ntest-$[[ inputs.service_name ]]-$[[ inputs.environment ]]:\n  script:\n    - run-tests-for $[[ inputs.service_name ]] in $[[ inputs.environment ]]\n```\n\n\nWhen included multiple times with different inputs, this creates jobs like `test-auth-service-staging`, `test-payment-service-production`, and `test-notification-service-development`. Each job has a unique, meaningful name that clearly indicates its purpose, making pipeline visualization much clearer than having multiple jobs with identical names that would overwrite each other.\n\n\nNow let's go back to the first example in the top of this blog and use inputs, one immediate benefit is that instead of maintaining multiple templates file we can use one reusable template with different input values:\n\n\n```yaml\n\nspec:\n  inputs:\n    environment:\n      type: string\n    database_url:\n      type: string\n    action:\n      type: string\n---\n\n$[[ inputs.action ]]-$[[ inputs.environment ]]:\n  script:\n    - echo \"Running $[[ inputs.action ]] in $[[ inputs.environment ]] environment\"\n    - echo \"Database URL is $[[ inputs.database_url ]]\"\n    - run-$[[ inputs.action ]] --env=$[[ inputs.environment ]] --db=$[[ inputs.database_url ]]\n```\n\n\nAnd in the main `gitlab-ci.yml` file we can include it twice (or more) with different values, making sure we avoid naming collisions\n\n\n```yaml\n\ninclude:\n  - local: 'templates/environment-template.yml'\n    inputs:\n      environment: test\n      database_url: test-db.example.com\n      action: tests\n  - local: 'templates/environment-template.yml'\n    inputs:\n      environment: production\n      database_url: prod-db.example.com\n      action: deploy\n```\n\n\n**The result:** Instead of maintaining separate YAML files for testing and deployment jobs, you now have a single reusable template that handles both use cases safely. This approach scales to any number of environments or job types — reducing maintenance overhead, eliminating code duplication, and ensuring consistency across your entire pipeline configuration. One template to maintain instead of many, with zero risk of variable collision or configuration drift.\n\n\n### Validation and type safety\n\n\nAnother key difference between variables and inputs lies in validation capabilities. Inputs support different value types, including strings, numbers, booleans, and arrays, with validation occurring immediately when the pipeline is created. If you define an input as a boolean but pass a string, GitLab will reject the pipeline before any jobs execute, saving time and resources.\n\n\nHere is an example of the enormous benefit of type validation.\n\n\n**Without type validation (variables):**\n\n\n```yaml\n\nvariables:\n  ENABLE_TESTS: \"true\"  # Always a string\n  MAX_RETRIES: \"3\"      # Always a string\n\ndeploy_job:\n  script:\n    - if [ \"$ENABLE_TESTS\" = true ]; then  # This fails!\n        echo \"Running tests\"\n      fi\n    - retry_count=$((MAX_RETRIES + 1))      # String concatenation: \"31\"\n\n```\n\n\n**Problem:**  The boolean check fails because “`true`” (string) is not equal to `true`, (boolean).\n\n\n**With type validation (inputs):**\n\n\n```yaml\n\nspec:\n  inputs:\n    enable_tests:\n      type: boolean\n      default: true\n    max_retries:\n      type: number\n      default: 3\n\n      \ndeploy_job:\n  script:\n    - if [ \"$[[ inputs.enable_tests ]]\" = true ]; then  # Works correctly\n        echo \"Running tests\"\n      fi\n    - retry_count=$(($[[ inputs.max_retries ]] + 1))    # Math works: 4\n\n```\n\n\n**Real-world impact for variable type validation failure**: A developer or a process triggers a GitLab CI/CD pipeline with `ENABLE_TESTS = yes` instead of `true`. Assuming it takes on average 30 minutes before the deployment job starts, then finally when this job kicks off, 30 minutes or longer into the pipeline run, the deployment script tries to evaluate the boolean and fails.  \n\n\nImagine the impact in terms of time-to-market and, of course. developer time trying to debug why a seemingly basic deploy job failed.\n\n\nWith type inputs, GitLab CI/CD will immediately throw an error and provide an explicit error message regarding the type mismatch.\n\n\n### Security and access control\n\n\nInputs provide enhanced security through controlled parameter passing with explicit contracts that define exactly what values are expected and allowed, creating clear boundaries between parameter passing to the pipeline, In addition. inputs are immutable. Once the pipeline starts, they cannot be modified during execution, providing predictable behavior throughout the pipeline lifecycle and eliminating the security risks that come from runtime variable manipulation.\n\n\n### Scope and lifecycle\n\n\nWhen you define variables using the `variables:` keyword at the top level of your `.gitlab-ci.yml` file, these variables become defaults for all jobs in your entire pipeline. When you include templates, you must consider what variables you've defined globally, as they can interact with the template's expected behavior through GitLab's variable precedence order.\n\n\nInputs are defined in CI configuration files (e.g. components or templates) and assigned values when a pipeline is triggered, allowing you to customize reusable CI configurations. They exist solely for pipeline creation and configuration time, scoped to the CI configuration file where they're defined, and become immutable references once the pipeline begins execution. Since each component maintains its own inputs, there is no risk of inputs interfering with other components or templates in your pipeline, eliminating variable collision and override issues that can occur with variable-based approaches.\n\n\n## Working with variables and inputs together\n\n\nWe recognize that teams have extensive investments in their variable-based workflows, and migration to inputs doesn't happen overnight. That's why we've developed capabilities that allow inputs and variables to work seamlessly together, providing a bridge between existing variables and the benefits of inputs while overcoming some key challenges in variable expansion.\n\n\nLet's look at this real-world example.\n\n\n**Variable expansion in rules conditions**\n\n\nA common challenge occurs when using variables that contain other variable references in `rules:if` conditions. GitLab only expands variables one level deep during rule evaluation, which can lead to unexpected behavior:\n\n\n```yaml\n# This doesn't work as expected\n\nvariables:\n  TARGET_ENV:\n    value: \"${CI_COMMIT_REF_SLUG}\"\n\ndeploy-job:\n  rules:\n    - if: '$TARGET_ENV == \"production\"'  # Compares \"${CI_COMMIT_REF_SLUG}\" != \"production\"\n      variables:\n        DEPLOY_MODE: \"blue-green\"\n```\n\n\nThe `expand_vars` function solves this by forcing proper variable expansion in inputs:\n\n```yaml\nspec:\n  inputs:\n    target_environment:\n      description: \"Target deployment environment\"\n      default: \"${CI_COMMIT_REF_SLUG}\"\n---\n\n\ndeploy-job:\n  rules:\n    - if: '\"$[[ inputs.target_environment | expand_vars ]]\" == \"production\"'\n      variables:\n        DEPLOY_MODE: \"blue-green\"\n        APPROVAL_REQUIRED: \"true\"\n    - when: always\n      variables:\n        DEPLOY_MODE: \"rolling\"\n        APPROVAL_REQUIRED: \"false\"\n  script:\n    - echo \"Target: $[[ inputs.target_environment | expand_vars ]]\"\n    - echo \"Deploy mode: ${DEPLOY_MODE}\"\n```\n\n\n### Why this matters\n\n\nWithout `expand_vars`, rule conditions evaluate against the literal variable reference (like `\"${CI_COMMIT_REF_SLUG}\"`) rather than the expanded value (like `\"production\"`). This leads to rules that never match when you expect them to, breaking conditional pipeline logic.\n\n\n**Important notes about expand_vars:**\n\n\n* Only variables that can be used with the include keyword are supported  \n\n* Variables must be unmasked (not marked as protected/masked)  \n\n* Nested variable expansion is not supported  \n\n* Rule conditions using `expand_vars` must be properly quoted: `'\"$[[ inputs.name | expand_vars ]]\" == \"value\"'`\n\n\nThis pattern solves the single-level variable expansion limitation, working for any conditional logic that requires comparing fully resolved variable values.\n\n\n### Function chaining for advanced processing\n\n\nAlong with `expand_vars`, you can use functions like `truncate` to shorten values for compliance with naming restrictions (such as Kubernetes resource names), creating sophisticated parameter processing pipelines while maintaining input safety and predictability.\n\n\n```yaml\n\nspec:  \n  inputs:\n    service_identifier:\n      default: 'service-$CI_PROJECT_NAME-$CI_COMMIT_REF_SLUG'\n---\n\ncreate-resource:\n  script:\n    - resource_name=$[[ inputs.service_identifier | expand_vars | truncate(0,50) ]]\n```\n\n\nThis integration capability allows you to adopt inputs gradually while leveraging your existing variable infrastructure, making the migration path much smoother.\n\n\n### From components only to CI pipelines\n\n\nUp until GitLab 17.11, GitLab users were able to use inputs only in components and templates through the `include:` syntax. This limited their use to reusable CI/CD configurations, but didn't address the broader need for dynamic pipeline customization.\n\n\n### Pipeline-wide inputs support\n\n\nStarting with GitLab 17.11, GitLab users can now use inputs to safely modify pipeline behavior across all pipeline execution contexts, replacing the traditional reliance on pipeline variables. This expanded support includes:\n\n\n* Scheduled pipelines: Define inputs with defaults for automated pipeline runs while allowing manual override when needed.  \n\n* Downstream pipelines: Pass structured inputs to child and multi-project pipelines with proper validation and type safety.  \n\n* Manual pipelines: Present users with a clean, validated form interface.\n\n\nThose enhancements, with more to follow, allow teams to modernize their pipelines while maintaining backward compatibility gradually. Once inputs are fully adopted, users can disable pipeline variables to ensures a more secure and predictable CI/CD environment.\n\n\n## Summary\n\n\nThe transition from variables to inputs represents more than just a technical upgrade — it's a shift toward more maintainable, predictable, and secure CI/CD pipelines. While variables continue to serve important purposes for configuration, inputs provide the parameter-passing capabilities that teams have been working around for years.\n\n\nWe understand that variables are deeply embedded in existing workflows, which is why we've built bridges between the two systems. The `expand_vars` function and other input capabilities allow you to adopt inputs gradually while leveraging your existing variable infrastructure.\n\n\nBy starting with new components and templates, then gradually migrating high-impact workflows, you'll quickly see the benefits of clearer contracts, earlier error detection, and more reliable automation that scales across your organization. Additionally, moving to inputs creates an excellent foundation for leveraging [GitLab's CI/CD Catalog](https://gitlab.com/explore/catalog), where reusable components with typed interfaces become powerful building blocks for your DevOps workflows but more on that in our next blog post.\n\n\nYour future self and your teammates will thank you for the clarity and reliability that inputs bring to your CI/CD workflows, while still being able to work with the variable systems you've already invested in.\n\n\n## What's next \n\n\nLooking ahead, we're expanding inputs to solve two key challenges: enhancing pipeline triggering with cascading options that [dynamically adjust based on user selections](https://gitlab.com/gitlab-org/gitlab/-/issues/520094), and providing job-level inputs that allow users to [retry individual jobs with different parameter values](https://gitlab.com/groups/gitlab-org/-/epics/17833). We encourage you to follow these discussions, share your feedback, and contribute to shaping these features. You can also provide general feedback on CI/CD inputs through our [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/407556).\n\n## Read more\n\n- [How to include file references in your CI/CD components](https://about.gitlab.com/blog/how-to-include-file-references-in-your-ci-cd-components/)\n- [CI/CD inputs documentation](https://docs.gitlab.com/ci/inputs/)\n- [CI/CD Catalog goes GA: No more building pipelines from scratch](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/)\n- [GitLab environment variables demystified](https://about.gitlab.com/blog/demystifying-ci-cd-variables/)\n",[9,693,697],{"featured":6,"template":700,"slug":1626},"ci-cd-inputs-secure-and-preferred-method-to-pass-parameters-to-a-pipeline","content:en-us:blog:ci-cd-inputs-secure-and-preferred-method-to-pass-parameters-to-a-pipeline.yml","Ci Cd Inputs Secure And Preferred Method To Pass Parameters To A Pipeline","en-us/blog/ci-cd-inputs-secure-and-preferred-method-to-pass-parameters-to-a-pipeline.yml","en-us/blog/ci-cd-inputs-secure-and-preferred-method-to-pass-parameters-to-a-pipeline",{"_path":1632,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1633,"content":1639,"config":1644,"_id":1646,"_type":14,"title":1647,"_source":16,"_file":1648,"_stem":1649,"_extension":19},"/en-us/blog/ci-cd-market-consolidation",{"title":1634,"description":1635,"ogTitle":1634,"ogDescription":1635,"noIndex":6,"ogImage":1636,"ogUrl":1637,"ogSiteName":685,"ogType":686,"canonicalUrls":1637,"schema":1638},"The CI/CD market consolidation","The DevOps industry is consolidating. GitLab is here to stay.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679957/Blog/Hero%20Images/consolidate.jpg","https://about.gitlab.com/blog/ci-cd-market-consolidation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The CI/CD market consolidation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2019-02-21\",\n      }",{"title":1634,"description":1635,"authors":1640,"heroImage":1636,"date":1641,"body":1642,"category":300,"tags":1643},[1425],"2019-02-21","\n\nSince the beginning of the year, we’ve seen consolidation in the DevOps industry.\nIn January, we saw that [Travis CI was acquired by Idera](https://techcrunch.com/2019/01/23/idera-acquires-travis-ci/)\nand today we saw [Shippable acquired by JFrog](https://techcrunch.com/2019/02/21/jfrog-acquires-shippable-adding-continuous-integration-and-delivery-to-its-devops-platform/).\nContinuous integration is a key part of a developer’s workflow and important for\ngetting your code to market quickly. As enterprises continue to go cloud-native,\n[CI/CD](/topics/ci-cd/) is a key part of [delivering innovative software products](https://about.gitlab.com/blog/application-modernization-best-practices/)\nto market and staying ahead of competition.\n\nMost technology markets go through stages as they mature. When a young technology\nis first becoming popular, there tends to be an explosion of tools to support it.\nNew technologies have a lot of rough edges that make them difficult to use and\nearly tools tend to center around making the experience easier to adopt and use.\nOnce a technology matures, tool consolidation is a natural part of the life cycle.\n\n## Partnering on CI\n\nWith consolidation, it’s no secret that people get nervous about the partner they\nare choosing as their CI backbone.\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Where should OSS projects move to if currently using \u003Ca href=\"https://twitter.com/travisci?ref_src=twsrc%5Etfw\">@travisci\u003C/a>? \u003Ca href=\"https://t.co/TZJF80T1X9\">https://t.co/TZJF80T1X9\u003C/a>\u003C/p>&mdash; Carl DB (@carllerche) \u003Ca href=\"https://twitter.com/carllerche/status/1098669954759516162?ref_src=twsrc%5Etfw\">February 21, 2019\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\nHere at GitLab, our users are first and foremost in mind. We’re not going anywhere.\nIn fact, we continue to build out our [leadership team](https://www.businessinsider.com/gitlab-cmo-cro-ipo-2019-2) and have stated publicly,\nmany times over, that we’re on the road to IPO by November 2020. So we invite you\nto [try out GitLab](https://gitlab.com/users/sign_in#register-pane).\n\n## We make it easy - GitLab CI/CD for GitHub\n\nSo no matter where you store your code, we can help. If you host your code on\nGitHub, you can build, test, and deploy all on GitLab. You can follow along in\nthe video below or learn more [here](https://about.gitlab.com/solutions/github/) or follow the documentation [here](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/github_integration.html).\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/qgl3F2j-1cI\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\nAnd if you’d like to try using GitLab end-to-end from planning to product monitoring,\nwe make it easy to move your code over to GitLab and use a single application for\nyour developer workflow.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/VYOXuOg9tQI\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n## #TravisAlums - GitLab is hiring\nLastly if you’re a Travis Alum, first off thank you for your work on advancing the industry forward.\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">Can I ask a favor? Given the recent news I&#39;ve been sharing about Travis CI&#39;s recent layoffs, and your overwhelming support in my DMs, if you used Travis and are looking to hire, can you tweet using the hashtag \u003Ca href=\"https://twitter.com/hashtag/travisAlums?src=hash&amp;ref_src=twsrc%5Etfw\">#travisAlums\u003C/a> ?\u003C/p>&mdash; Carmen Hernández Andoh (@carmatrocity) \u003Ca href=\"https://twitter.com/carmatrocity/status/1098583889864478720?ref_src=twsrc%5Etfw\">February 21, 2019\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\nWe know that with any acquisition there are always folks who will find a good home\nin their new parent company and folks for whom the new situation will no longer\nbe a good fit. We hope for the former, but if you are looking for a [new opportunity](https://about.gitlab.com/jobs/),\nwe believe we have a great team of folks and are working on some of the most\nexciting challenges in this space.",[1062,9],{"slug":1645,"featured":6,"template":700},"ci-cd-market-consolidation","content:en-us:blog:ci-cd-market-consolidation.yml","Ci Cd Market Consolidation","en-us/blog/ci-cd-market-consolidation.yml","en-us/blog/ci-cd-market-consolidation",{"_path":1651,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1652,"content":1658,"config":1663,"_id":1665,"_type":14,"title":1666,"_source":16,"_file":1667,"_stem":1668,"_extension":19},"/en-us/blog/ci-cd-the-ticket-to-multicloud",{"title":1653,"description":1654,"ogTitle":1653,"ogDescription":1654,"noIndex":6,"ogImage":1655,"ogUrl":1656,"ogSiteName":685,"ogType":686,"canonicalUrls":1656,"schema":1657},"CI/CD: The ticket to multicloud","Read our expert panel from MulticloudCon on how CI/CD and cloud-agnostic DevOps help organizations go multicloud and increase productivity.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679235/Blog/Hero%20Images/cloud-native-predictions-2019.jpg","https://about.gitlab.com/blog/ci-cd-the-ticket-to-multicloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"CI/CD: The ticket to multicloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-01-17\",\n      }",{"title":1653,"description":1654,"authors":1659,"heroImage":1655,"date":1660,"body":1661,"category":1040,"tags":1662},[715],"2020-01-17","\n\nIn November 2019, we had the opportunity to co-host [MulticloudCon](https://multicloudcon.io/), a zero-day event with our partners at [Upbound](https://upbound.io/). The event featured experts in cloud, Kubernetes, database resources, CI/CD, security, and more, to learn how [multicloud is evolving](/topics/multicloud/) and empowering developers and operations experts across the industry.\n\nDevOps can play a major role in cloud usage. In this discussion from MulticloudCon, we assembled a panel of experts across the industry to talk about [CI/CD](/solutions/continuous-integration/) and DevOps in multiple clouds. As [multicloud](/topics/multicloud/) technology continues to evolve, tools can give organizations more control and flexibility on where their workloads live and where they deploy.\n\n![CI/CD MulticloudCon panelists](https://about.gitlab.com/images/blogimages/multicloudcon-panel.png){: .shadow.medium.center}\n\n## Panel highlights\n\n### Why multicloud is important:\n\n> “If we have a single point of failure on a cloud, it is really easy to have some downtime [or] an outage and be like, \"Well, it was my cloud provider's fault.\" But, to our customers, that doesn't matter. You as a company, we're down and that affects them.”\n– Ana Medina, Chaos Engineer at [Gremlin](https://www.gremlin.com/)\n\n> “There are a lot more applications now that are becoming event-driven and are relying on integrations with cloud providers. And if it's more than one, you can't just test on one provider and go well it works across the board. You need to be expanding your test coverage to cover multiple cloud providers.”\n– Denver Williams, DevOps/SRE Consultant at [Vulk Coop](http://vulk.coop/)\n\n\n### The challenges of multicloud:\n\n> “When you're running in multiple clouds, that also introduces problems… I'm talking more specifically about high availability and also fault tolerance and then disaster recovery. These are things people just think about, ‘Oh we need to connect, integrate.’ But at the end of the day, if you're serious about running these applications, you need to also think about those things. And introducing those complexities from the different cloud providers will definitely impact your operations.”\n– Angel Rivera, Developer Advocate at [CircleCI](https://circleci.com/)\n\n\n### How tools impact a multicloud strategy:\n\n> “One thing that helps a lot when you're working on deploys for multicloud is to choose tooling that is going to support multiple clouds off the bat… One thing you really want to avoid, if possible, is ending up with different workflows for different cloud providers. Because then you're testing with different CI/CD pipelines. It's different code and it's inevitably going to behave differently. And then you're going to run into weird bugs.” – Denver Williams\n\n> “When I'm talking to users and GitLab customers that are doing multicloud, they're doing a lot of orchestration and abstraction, and they're having to write an abstraction layer in order to homogenize a logic. A lot of folks have talked about Crossplane today. When I see these types of capabilities and Crossplane in that community emerging, that's pretty exciting because that's what I see a lot of folks writing all the time. That can just be pulled out into a tool and offloaded so that you can focus on the business logic.” – [William Chia](/company/team/#williamchia), Sr. Product Marketing Manager at GitLab\n\nLearn more about GitLab’s Crossplane integration in our [12.5 release](/releases/2019/11/22/gitlab-12-5-released/#crossplane-support-in-gitlab-managed-apps).\n\n\n### CI/CD and multicloud best practices:\n\n> “There's always going to be platform-specific code. Just keep that separate and then your actual YAML logic, keep it agnostic.” – Uma Mukkara, Co-founder and COO at [MayaData](https://mayadata.io/)\n\n> “At Gremlin we help companies avoid downtime. So, we're starting to work with integrations with CI/CD platforms so folks actually start having a stage that they run chaos engineering experiments... You can actually build a lot more testing around past outages that your company has had or maybe some of the large outages that we've seen around in the industry. Building testing around those scenarios, [we’re] making sure the caching layers are able to handle when one of your services goes down... If you're caching layer limits out, the other services that are dependent on it are able to still continue providing a good user experience.” – Ana Medina\n\n> “I always encourage people who are writing pipelines in our platform to do some checks against APIs that they use so that they can just fail their builds right away, instead of wasting money and effort and going to build that. It's going to eventually fail.” – Angel Rivera\n\nMulticloud is made possible through cloud native applications built from containers using services from different cloud providers, and allows for multiple services to be managed in one architecture. CI/CD plays a big role in workflow portability, ensuring workflows stay consistent (no matter where projects are deployed).\n\nWatch the full panel discussion below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/Sx02_fyaGgc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nPhoto by [Marc Wieland](https://unsplash.com/photos/zrj-TPjcRLA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/clouds?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n",[9,830,1228],{"slug":1664,"featured":6,"template":700},"ci-cd-the-ticket-to-multicloud","content:en-us:blog:ci-cd-the-ticket-to-multicloud.yml","Ci Cd The Ticket To Multicloud","en-us/blog/ci-cd-the-ticket-to-multicloud.yml","en-us/blog/ci-cd-the-ticket-to-multicloud",{"_path":1670,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1671,"content":1676,"config":1681,"_id":1683,"_type":14,"title":1684,"_source":16,"_file":1685,"_stem":1686,"_extension":19},"/en-us/blog/ci-minutes-for-free-users",{"title":1672,"description":1673,"ogTitle":1672,"ogDescription":1673,"noIndex":6,"ogImage":1200,"ogUrl":1674,"ogSiteName":685,"ogType":686,"canonicalUrls":1674,"schema":1675},"Changes to CI pipeline minutes for new free users","This change better aligns to GitLab's buyer-based open-core model.","https://about.gitlab.com/blog/ci-minutes-for-free-users","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Changes to CI pipeline minutes for new free users\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2020-03-18\",\n      }",{"title":1672,"description":1673,"authors":1677,"heroImage":1200,"date":1678,"body":1679,"category":1062,"tags":1680},[1425],"2020-03-18","___Update October 8, 2024: This blog is superseded by the blog post announcing [upcoming changes to CI/CD minutes for free tier users on GitLab.com](https://about.gitlab.com/blog/ci-minutes-update-free-users/). Please refer to our [pricing page](https://about.gitlab.com/pricing/) for the full breakdown of usage limits per tier.___\n\nEffective Sunday March 15, 2020, UTC, we are making changes to the CI pipeline minutes offered to *new* free users.\nMoving forward, all free accounts will have 2000 pipeline minutes per group per month independent of the visibility of the project.\nExisting free users will not have their plans changed.\n\n## What are pipeline minutes?\n\nAs we share on our [pricing page](https://about.gitlab.com/pricing/),\n\n> Pipeline minutes are the execution time for your pipelines on our shared runners. \n> Execution on your own runners will not increase your pipeline minutes count and is unlimited.\n\nPipeline minutes are a crucial part of what makes GitLab special.\n\n## Where did this come from?\n\nOne of our core values at GitLab is [efficiency](https://handbook.gitlab.com/handbook/values/#efficiency).\nAs GitLab grows and matures as both a company and a product, we've [really focused on becoming a more efficient company](https://youtu.be/wrnWaYS7Fgo?t=275). \nThis includes making sure we're being efficient in our CI offerings. \n\nAs we work on some new improvements to CI, including Windows and MacOS runners, we evaluated usage by free users.\nFrom an internal analysis, we found that 95% of free users who used CI minutes in January 2020 used fewer than 1000 CI minutes.\nBut we're not talking about 1000 minutes, we're talking about twice that. \nAnd 2000 minutes is a lot of minutes.\nThat's over an hour of CI minutes every day. \n\nWe are constantly working to provide the most value to our community through GitLab.\nThe best way we can do that is by strengthening our open source offering, including [make any features open source that are eligible to be open source](/company/pricing/#if-a-feature-can-be-moved-down-do-it-quickly).\n\n## What if that's not enough minutes?!\n\nIf 2000 minutes isn't enough, free users can buy [additional CI minutes](https://docs.gitlab.com/ee/subscriptions/#purchasing-additional-ci-minutes).\n\nAlternatively, you can bring your own runners. \nYou can [run specific runners for any of your projects](https://docs.gitlab.com/runner/). \nWe only count minutes on the shared runners we provide on GitLab.com.\n\n## What's next?\n\nExcited about all the cool things that can be done with CI?\nMe too!\n\nHere are some other things coming down the pipeline in the next couple of releases:\n\n* [Dynamic child pipeline creation via artifact includes](https://gitlab.com/gitlab-org/gitlab/-/issues/35632)\n* [Autoscaling GitLab CI jobs on AWS Fargate (MVC)](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/2972)\n* [Notifications for when pipelines are fixed](https://gitlab.com/gitlab-org/gitlab/-/issues/24309)\n",[9,721,268,827],{"slug":1682,"featured":6,"template":700},"ci-minutes-for-free-users","content:en-us:blog:ci-minutes-for-free-users.yml","Ci Minutes For Free Users","en-us/blog/ci-minutes-for-free-users.yml","en-us/blog/ci-minutes-for-free-users",{"_path":1688,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1689,"content":1694,"config":1699,"_id":1701,"_type":14,"title":1702,"_source":16,"_file":1703,"_stem":1704,"_extension":19},"/en-us/blog/ci-minutes-update-free-users",{"title":1690,"description":1691,"ogTitle":1690,"ogDescription":1691,"noIndex":6,"ogImage":1200,"ogUrl":1692,"ogSiteName":685,"ogType":686,"canonicalUrls":1692,"schema":1693},"Upcoming changes to CI/CD minutes for free tier users on GitLab.com","The reduction of CI/CD minutes aligns with the majority of free user usage","https://about.gitlab.com/blog/ci-minutes-update-free-users","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Upcoming changes to CI/CD minutes for free tier users on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2020-09-01\",\n      }",{"title":1690,"description":1691,"authors":1695,"heroImage":1200,"date":1696,"body":1697,"category":1062,"tags":1698},[1425],"2020-09-01","\nAt GitLab, we’ve been actively working towards empowering our community to make DevOps a reality for teams of all sizes. We’ve constantly [moved features down](/blog/new-features-to-core/) to our free product to enable more users to benefit from it. The [lower tiers offer more relative value](/company/pricing/#lower-tiers-have-more-relative-value) and help to get more users access to a complete DevOps platform.\n\nAs a result, the usage of GitLab has grown significantly over time to an estimated [30 million registered users](/why-gitlab/) - of which almost 6 million GitLab.com users are on our GitLab.com free tier. While we are excited by this exponential growth, our underlying costs to support this growth have increased significantly. As GitLab matures as both a company and a product, we must focus on becoming a more efficient company.\n\nWe evaluted CI/CD minute usage and found that 98.5% of free users use 400 CI/CD minutes or less per month. By lowering the current monthly usage limit, we are not only aligning the CI/CD minute limits with usage and related tier prices, but ensuring we can continue to maintain our commitment to offer a free GitLab.com tier.\n\n## Changes to the GitLab.com Free tier\n\nEffective October 1, 2020, we are reducing CI/CD minutes to 400 minutes per top-level group (or personal namespace) per month on the Free tier of GitLab.com.\n\n|    | Free | Bronze | Silver | Gold |\n| -- | ---- | ------ | ------ | ---- |\n| Price | $0 | $4 | $19 | $99 |\n| CI/CD Minutes | 400 | 2,000 | 10,000 | 50,000 |\n\n## Check and reduce CI/CD minutes used\n\nCI/CD minute usage can be reduced in a number of ways, including [bringing your own runners](https://docs.gitlab.com/runner/).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/GrO-8KtIpRA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nFor more details on the changes and how to manage and reduce your CI/CD minutes usage, please visit the [customer FAQ](/pricing/faq-compute-minutes/).\n\n## Options to increase CI/CD minutes available\n\nIf 400 minutes is not enough, you can purchase additional CI/CD minutes at $10 per 1000 minutes or upgrade to [a paid tier](/pricing/). Also, you can bring your own runners. You can [run specific runners for any of your projects](https://docs.gitlab.com/runner/). We only count minutes on the shared runners we provide on GitLab.com.\n\nGitLab also offers Gold tier capabilities and 50,000 minutes per group per month CI/CD minutes for our [Open Source](/solutions/open-source/join/), [Education](/solutions/education/), and [Startups](/solutions/startups/) programs. If you are eligible for these programs, consider applying through their relevant program pages.\n\n## CI/CD minute limits will remain unchanged for Open Source, Education and Startups programs\n\nCI/CD minute limits will **remain unchanged** for members of our GitLab for [Open Source](/solutions/open-source/join/), [GitLab for Education](/solutions/education/), and [GitLab for Startups](/solutions/startups/) programs and will continue to match our [Gold tier](/pricing/). For more information on these programs and how to apply, please visit the relevant program pages.\n\n## More information\n\nPlease refer to the [customer FAQ](/pricing/faq-compute-minutes/) for more information.\n\nTo address your questions and feedback, we have created a space in the [GitLab Community Forum](https://forum.gitlab.com/t/ci-cd-minutes-for-free-tier/40241), which is actively monitored by GitLab Team members and Product Managers involved with this change.\n",[9,721,268,827],{"slug":1700,"featured":6,"template":700},"ci-minutes-update-free-users","content:en-us:blog:ci-minutes-update-free-users.yml","Ci Minutes Update Free Users","en-us/blog/ci-minutes-update-free-users.yml","en-us/blog/ci-minutes-update-free-users",{"_path":1706,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1707,"content":1713,"config":1719,"_id":1721,"_type":14,"title":1722,"_source":16,"_file":1723,"_stem":1724,"_extension":19},"/en-us/blog/cloud-native-architectures-made-easy",{"title":1708,"description":1709,"ogTitle":1708,"ogDescription":1709,"noIndex":6,"ogImage":1710,"ogUrl":1711,"ogSiteName":685,"ogType":686,"canonicalUrls":1711,"schema":1712},"Simplifying and optimizing cloud native architectures","Learn what cloud native architectures are, how to optimize them using GitLab's cohesive approach and what features you can use to help be more efficient.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671263/Blog/Hero%20Images/cloudarchitecture.jpg","https://about.gitlab.com/blog/cloud-native-architectures-made-easy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Simplifying and optimizing cloud native architectures\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2019-11-13\",\n      }",{"title":1708,"description":1709,"authors":1714,"heroImage":1710,"date":1716,"body":1717,"category":300,"tags":1718},[1715],"Suri Patel","2019-11-13","\nMany teams embark on a journey to strengthen operations and development. Whether it’s battling monolithic applications by adopting containers and microservices or attempting to elevate a mature architecture by switching CI/CD tools, it is important to have a solution with robust cloud native support. When containers and cloud native workflows are easy to set and maintain, teams increase operational efficiency and can focus on delivering better products faster.\n\n## What goes into a cloud native architecture?\n\n[Cloud native applications](/topics/cloud-native/) are built using [microservices](/topics/microservices/) rather than a monolithic application structure. You can think of microservices as smaller pieces that unite to perform a specific action. Microservices can be scaled based on load, creating a more resilient environment. Container orchestration tools, like [Kubernetes](/solutions/kubernetes/), enable developers to manage the way an application’s containers function, including scaling and deployment.\n\nEmbracing cloud native architectures results in an increase in developer time, a decrease in the amount of money spent on monitoring and scaling application resources (through cloud orchestration and container schedulers), and faster shipping.\n\n## GitLab is designed for cloud native architectures\n\nGitLab’s [Kubernetes](/solutions/kubernetes/) integration, [built-in container registry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html), and advanced [CI/CD features](/solutions/continuous-integration/) support microservices, such as multi-project pipelines, and monorepo projects. Furthermore, teams can keep the same workflow regardless of which cloud apps they are deploying to, so there’s no need to rework your entire process.\n\n## Why choose GitLab for your cloud native needs\n\nGitLab has a prominent place in the cloud native ecosystem and according to Forrester: [“GitLab’s simple and cohesive approach lands it squarely as a leader. GitLab's approach of having a single application to manage each phase of software development comes through in its developer experience”](/analysts/forrester-cloudci19/).\n\nGitLab doesn’t require manual and painstaking scripts. Our tool has native capabilities for Kubernetes integration and an out-of-the-box solution for advanced deployment flows for progressive delivery, like incremental rollout and canary deploys. GitLab also comes with [feature flagging as a built-in capability](/blog/feature-flags-continuous-delivery/), eliminating the need for a third-party solution.\n\nGitLab’s [multicloud](/topics/multicloud/) strategy with workflow portability increases operational efficiencies and makes it the easiest way to build cloud native applications.\n\nCover image by [Julian Santa Ana](https://unsplash.com/@jul_xander) on [Unsplash](https://unsplash.com/photos/FKqH1QhUqaw)\n{: .note}\n",[9,830],{"slug":1720,"featured":6,"template":700},"cloud-native-architectures-made-easy","content:en-us:blog:cloud-native-architectures-made-easy.yml","Cloud Native Architectures Made Easy","en-us/blog/cloud-native-architectures-made-easy.yml","en-us/blog/cloud-native-architectures-made-easy",{"_path":1726,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1727,"content":1733,"config":1740,"_id":1742,"_type":14,"title":1743,"_source":16,"_file":1744,"_stem":1745,"_extension":19},"/en-us/blog/cloudhealth-and-gitlab-reducing-overruns",{"title":1728,"description":1729,"ogTitle":1728,"ogDescription":1729,"noIndex":6,"ogImage":1730,"ogUrl":1731,"ogSiteName":685,"ogType":686,"canonicalUrls":1731,"schema":1732},"How to prevent deployments from overrunning your budget","Guest authors from VMware share how to include budget and resource checking into your continuous deployment with Cloudhealth and GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670389/Blog/Hero%20Images/gitlab-cloud-journey.png","https://about.gitlab.com/blog/cloudhealth-and-gitlab-reducing-overruns","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to prevent deployments from overrunning your budget\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Davis\"},{\"@type\":\"Person\",\"name\":\"Bahubali (Bill) Shetti\"}],\n        \"datePublished\": \"2019-08-26\",\n      }",{"title":1728,"description":1729,"authors":1734,"heroImage":1730,"date":1737,"body":1738,"category":1040,"tags":1739},[1735,1736],"Tim Davis","Bahubali (Bill) Shetti","2019-08-26","\n\nManaging deployments is a complex task and DevOps admins generally consider it a victory when a deployment is\nachieved and somewhat repeatable. Unfortunately this process doesn't give DevOps admins time to\nconsider the impact of the outcome on the larger operations pipeline. We know the importance of\n[Continuous Verification](https://thenewstack.io/continuous-verification-the-missing-link-to-fully-automate-your-pipeline/)\n– it's just one of several day-two operations and best practices that need to be brought into the\ncontinuous deployment (CD) process to achieve efficiencies. But we also need to look at the budget.\n\n## Adding budget and resource checking into your CD\n\nMost developers and DevOps admins don't consider the impact of their deployment on the budget. They\nalso don't generally check if sufficient resources in AWS exist prior to deployment because, after\nall, aren't there \"unlimited\" resources on AWS?\n\nAdding the proper budget and resource checks into the pipeline helps avoid:\n\n* Potential rollbacks and clean-up actions\n* Redeployment (\"lift and shift\") into other regions in AWS\n* Long analysis to pinpoint budget overruns\n\nNot having to deal with these tasks improves the DevOps admin's metrics, such as mean time to change (MTTC),\ndeployment time, etc., and subsequently efficiency goes up.\n\n## Understanding the policy\n\nPrior to implementing any of these checks, it’s important to understand the \"policy.\" While every\norganization is different, and the iterations of \"policy\" are endless, there are some basic checks\nthat should always be implemented:\n\n* Ensure the project-specific budget is not already overrun\n* Will this deployment exceed the project budget?\n* Is the project already over project-specific limits and restrictions? (i.e. cannot use RDS, or\ncan't have more than 10 EC2 instances in a deployment)\n* Will this deployment exceed the project-specific resource policy?\n\nWith these basic checks in place, at least some initial sanity is achieved during a pipeline execution.\nMore and more complex iterations can be added as more is learned about the project and processes are improved.\n\n## How do you do it?\n\nRegardless of the policy complexity, implementing these checks can be easily accomplished with\nstandard off-the-shelf tools like [CloudHealth by VMware](https://cloudhealthtech.com) and [GitLab](/).\n\n* CloudHealth by VMware allows you to define \"perspectives\" specific to your project, create governance\nrules, and access this information through an API for easy integration into any CI/CD tool.\n* GitLab allows you to easily add in scripts and/or pre-built code (containers) enabling\nany possible check against any potential external system.\n\nIn order to highlight how to implement this type of check into the CI/CD pipeline, we've\ndelivered an [example configuration](https://cloudjourney.io/articles/multicloudops/budget_check_cicd-td/)\nusing both CloudHealth and GitLab. We hope this provides a nice baseline to build from.\n\n![CD WITH A CH check from GitLab CI/CD pipelines](https://about.gitlab.com/images/blogimages/glcdpipeline.png){: .shadow.medium.center}\n\n## In summary\n\nAlthough we've provided a baseline that we hope can be used for more complex policy checks in CD,\nconvincing DevOps admins to implement this is another problem. Improving metrics should provide\nan incentive for DevOps admins but it is not sufficient for them to simply add budget and resource checks.\nWhile every enterprise has its own process and metrics, we recommend adding a budgetary efficiency\nmetric for DevOps admins.\n\nUsing the configuration above, it’s easy to add in CloudHealth to continuously check the project's\nbudget and utilization, and adding a DevOps budget metric will not only ensure that these checks\nare deployed but will also lead to more efficient deployments.\n\nIf you have any questions regarding this or any other issue, feel free to reach out\nto us [@cloudjourneyio](https://twitter.com/cloudjourneyio) on Twitter!\n\n### About the guest authors\n\n_Bahubali (Bill) Shetti is the director of public cloud solutions for VMware Cloud Services at VMware.\nHe leads a team of cloud architects that evangelize and develop solutions for improving public cloud\noperations (AWS/Azure/GCP). Bahubali was part of the initial team that developed and launched\nVMware Cloud Services. Previous to VMware, he was director of product management at VCE\n(now Dell) for Cloud Management Products. Between 2011-2014, Bahubali lead operations at Cumulus\nNetworks, lead AWS cloud operations at several startups, and headed an open source routing\nsoftware project. Between 2008-2010, Bahubali lead the cloud investment practice at Storm Ventures.\nHe spent 9 years at Cisco in product management and business development. He holds an M.S. in\nInformation Networking from Carnegie Mellon and a B.S. in Electrical Engineering from Rutgers._\n\n_Tim Davis is a cloud advocate at VMware where he focuses on public cloud operations and cloud native\napplications. He provides consulting guidance to a wide range of customers on these topics and\nprovides a bridge between customers and product teams at VMware. He also works to evangelize\nnative cloud usage including AWS, Azure and GCP. Prior to his current role, he was a specialist systems\nengineer focused on VMware’s Networking and Security product line. Before VMware, Tim worked as a\nconsultant and VMware architect at Dell Services, which wasone of the largest contracts held at\nthe time. His background is in operations/management and architecture. He holds numerous\nindustry certifications including from VMware and Amazon Web Services._\n",[9,830,721,232],{"slug":1741,"featured":6,"template":700},"cloudhealth-and-gitlab-reducing-overruns","content:en-us:blog:cloudhealth-and-gitlab-reducing-overruns.yml","Cloudhealth And Gitlab Reducing Overruns","en-us/blog/cloudhealth-and-gitlab-reducing-overruns.yml","en-us/blog/cloudhealth-and-gitlab-reducing-overruns",{"_path":1747,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1748,"content":1754,"config":1759,"_id":1761,"_type":14,"title":1762,"_source":16,"_file":1763,"_stem":1764,"_extension":19},"/en-us/blog/coming-soon-gitlab-dependency-firewall",{"title":1749,"description":1750,"ogTitle":1749,"ogDescription":1750,"noIndex":6,"ogImage":1751,"ogUrl":1752,"ogSiteName":685,"ogType":686,"canonicalUrls":1752,"schema":1753},"Coming soon: GitLab dependency firewall","Learn how this new feature will help organizations avoid supply chain software attacks by warning them or blocking the download based on a project's policy.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665667/Blog/Hero%20Images/built-in-security.jpg","https://about.gitlab.com/blog/coming-soon-gitlab-dependency-firewall","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Coming soon: GitLab dependency firewall\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2024-03-26\",\n      }",{"title":1749,"description":1750,"authors":1755,"heroImage":1751,"date":1756,"body":1757,"category":697,"tags":1758},[1122],"2024-03-26","The [Maven dependency\nproxy](https://about.gitlab.com/blog/gitlabs-maven-dependency-proxy-is-available-in-beta/)\nwas released in GitLab 16.8. This new feature allows organizations to proxy\nand cache packages from one upstream repository to a GitLab project, which\ncan help reduce reliance on external sources.\n\n\nHowever, with this added efficiency there is an added security risk of\nsoftware supply chain attacks like\n[typosquatting](https://www.mcafee.com/learn/what-is-typosquatting) and\nother dependency confusion attacks. Supply chain attacks are when attackers\ntry to get developers and CI/CD pipelines to include malicious packages to\nincrease the surface area of the attack.\n\n\nThe [dependency\nfirewall](https://gitlab.com/groups/gitlab-org/-/epics/5133), planned for\nthe second half of 2024, will help organizations avoid these attacks by\nwarning them or blocking the download based on their project's policy.\n\n\n## What is the dependency firewall?\n\n\nThe dependency firewall is the first line of defense when downloading\npackages from the internet.\n\n\nAt a high level, GitLab wants to build the following capabilities into the\ndependency firewall:\n\n\n* prevent malicious packages from entering the software supply chain\n\n* check each new package against GitLab\n[policy](https://docs.gitlab.com/ee/user/application_security/policies/)\n\n* quarantine packages for review before they are available\n\n* manage quarantined packages\n\n* report package usage\n\n\n### What does a dependency firewall policy do?\n\n\nThe planned dependency firewall policy will do two things: `warn` and\n`fail`. You will be able to create a **dependency firewall policy** that\nwarns your organization when certain conditions are met or quarantines the\npackage. For example, you can create a policy that prevents the package from\nbeing downloaded if it has any known critical vulnerabilities. Or you can\nsimply add a warning for packages with known, but less severe,\nvulnerabilities. \n\n\n**Note:** The warnings can be limited to the log files for the minimal\nviable change (MVC).\n\n\nThe first rule we'll support will be as follows:\n\n```\n\n1. When `Security scan`\n\n2. Select \"Scanners\" (dependency scanning)\n\n3. With `No exceptions` that finds `Any` vulnerabilities matching\n\n4. `Critical` severity\n\n```\n\n\nFor the MVC, we will focus on adding a warning when a package downloaded\nthrough the dependency proxy has any known critical vulnerabilities. \n\n\nBeyond the MVC, we will add support for the following:\n\n- lower severity vulnerabilities\n\n- warnings in the package registry UI list view\n\n- rules to quarantine packages\n\n- the ability to review and update the quarantine\n\n- the ability to add a warning to the security vulnerability report\n\n\n## More about rules\n\n\n1. Rules that are `warn` only can leverage a background job. Rules that\n`fail` need to be handled by the web request.\n\n1. Rules handled by a background job can have an extended scope. For\nexample, we can inspect the package information and open the archive to get\nthe metadata, inspect it, and provide more robust rules and conditions.\n\n1. Rules handled within the web request must be fast and scalable. This will\nlimit what we can do in these cases.\n\n\n## Next steps\n\n\nTo learn more or contribute to the dependency firewall, please [visit our\ndependency firewall\nepic](https://gitlab.com/groups/gitlab-org/-/epics/5133).\n\n\n_Disclaimer: This blog contains information related to upcoming products,\nfeatures, and functionality. It is important to note that the information in\nthis blog post is for informational purposes only. Please do not rely on\nthis information for purchasing or planning purposes. As with all projects,\nthe items mentioned in this blog and linked pages are subject to change or\ndelay. The development, release, and timing of any products, features, or\nfunctionality remain at the sole discretion of GitLab._\n",[697,1062,695,9],{"slug":1760,"featured":6,"template":700},"coming-soon-gitlab-dependency-firewall","content:en-us:blog:coming-soon-gitlab-dependency-firewall.yml","Coming Soon Gitlab Dependency Firewall","en-us/blog/coming-soon-gitlab-dependency-firewall.yml","en-us/blog/coming-soon-gitlab-dependency-firewall",{"_path":1766,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1767,"content":1773,"config":1779,"_id":1781,"_type":14,"title":1782,"_source":16,"_file":1783,"_stem":1784,"_extension":19},"/en-us/blog/compliance-management",{"title":1768,"description":1769,"ogTitle":1768,"ogDescription":1769,"noIndex":6,"ogImage":1770,"ogUrl":1771,"ogSiteName":685,"ogType":686,"canonicalUrls":1771,"schema":1772},"Managing Compliance with GitLab","GitLab makes compliance easy!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681610/Blog/Hero%20Images/andrew-neel-cckf4TsHAuw-unsplash.jpg","https://about.gitlab.com/blog/compliance-management","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Managing Compliance with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2020-10-01\",\n      }",{"title":1768,"description":1769,"authors":1774,"heroImage":1770,"date":1776,"body":1777,"category":978,"tags":1778},[1775],"Fernando Diaz","2020-10-01","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nCompliance is a concept that has historically been complex and unfriendly. The goal of Compliance Management is to change \nthe current paradigm for compliance to create an experience that's simple and friendly. GitLab\nprovides advanced auditing features as well as merge request approvals based off of different compliance tags.\n\nWatch this short video (2 minutes) to learn how to configure GitLab compliance features.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/QV2dIocn-hk\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\nCover image by [Andrew Neel](https://unsplash.com/@andrewtneel) on [Unsplash](https://unsplash.com/)\n{: .note}\n\n\n",[9,697],{"slug":1780,"featured":6,"template":700},"compliance-management","content:en-us:blog:compliance-management.yml","Compliance Management","en-us/blog/compliance-management.yml","en-us/blog/compliance-management",{"_path":1786,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1787,"content":1793,"config":1799,"_id":1801,"_type":14,"title":1802,"_source":16,"_file":1803,"_stem":1804,"_extension":19},"/en-us/blog/continuous-integration-ticketmaster",{"title":1788,"description":1789,"ogTitle":1788,"ogDescription":1789,"noIndex":6,"ogImage":1790,"ogUrl":1791,"ogSiteName":685,"ogType":686,"canonicalUrls":1791,"schema":1792},"How GitLab CI supported Ticketmaster's ramp up to weekly mobile releases","Ticketmaster Android developer Jeff Kelsey shares why GitLab CI was a game changer for his team.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682946/Blog/Hero%20Images/tm-cover-image-small.jpg","https://about.gitlab.com/blog/continuous-integration-ticketmaster","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab CI supported Ticketmaster's ramp up to weekly mobile releases\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jeff Kelsey\"}],\n        \"datePublished\": \"2017-06-07\",\n      }",{"title":1788,"description":1789,"authors":1794,"heroImage":1790,"date":1796,"body":1797,"category":718,"tags":1798},[1795],"Jeff Kelsey","2017-06-07","\nIt's always been a goal for the Ticketmaster mobile team to get to weekly releases. In the first half of this year we were able to accomplish it, delivering new versions\nof both the Android and iOS app on a weekly basis since February. We've seen the positive impact on our fans, and it was even easier than we thought –\nmaking our entire application development process that much better.\n\nBut it didn't start out this way...\n\n\u003C!-- more -->\n\n![review-2](https://about.gitlab.com/images/blogimages/ticketmaster-assets/Review2.png \"Most user-friendly ticketing app\")*\u003Csmall>A faster, more consistent release cycle leads to a better fan experience for users of the Ticketmaster Apps.\u003C/small>\n\nThere comes a time in every engineer’s career when a part of your tech stack no longer passes the “smell test.\" Usually, there is some sort of dramatic event where something that was generally accepted as “isn’t the best, but it works” changes to “this is now a problem.” For me and the Ticketmaster mobile team, this event happened with our Jenkins-based CI pipeline in February.\n\nWe were about to release the newest version of our Android app, but there was a mistake in the build. We had forgotten to increment the Android versionCode, meaning we would need to update and create a new binary file to upload to the store. It was the end of the day, a sunny afternoon quickly fading to darkness in Hollywood. By now it was 6pm PST, and everyone was eager to leave.\n\n\"No problem,\" I thought. I can build the release locally in under three minutes, provide the file to the QA team, and we can all get on our way.\n\n“Won’t help us,” responded my high-standard and exceptional QA team.\n\n“All releases need to come from CI for consistency.” They were right. Local builds would not be safe for production. What if something about my machine’s configuration introduced an issue?\n\n“Ok, so how long does it take for the release build to get created through our Jenkins CI pipeline?” I asked, figuring the time couldn’t be worse than 30 minutes.\n\n“It takes two hours,” came the response. Sigh… Going to be a late night.\n\n![sysiphus](https://about.gitlab.com/images/blogimages/ticketmaster-assets/sysiphus.gif \"Sysiphus\")\n\n*\u003Csmall>Our old CI pipeline\u003C/small>*\n\n## GitLab CI to save the day (in a day!)\n\nTwo… hours…  For a minor change. Now I can’t lay all the blame on Jenkins. Some of this may have been our own fault, generating too many build flavors, forcing clean rebuilds in between steps and running extra tests for deprecated features. But, it was clear we needed to change and get better at CI. Jenkins was always a bit clunky for the last few years. Weighed down by plugins and years of legacy development, it was also difficult for us to update the Jenkins machines with new SDKs, and we had to rely on other teams to assist us. We clearly needed a fresh start.\n\nWe had been using GitLab at Ticketmaster for several years for code review and visually browsing our git history, so it made sense that trying to utilize [GitLab’s new CI tools](/solutions/continuous-integration/) would be worth a shot. I started with a helpful Android [blog post for setting up GitLab CI from Greyson Parrelli](http://www.greysonparrelli.com/post/setting-up-android-builds-in-gitlab-ci-using-shared-runners/).\n\nBut I soon ran into a problem. At Ticketmaster we use Amazon ECR for our [Docker](https://aws.amazon.com/docker/) container registry rather than GitLab repos, like in the tutorial. With the help of Kraig Amador, Tim Nichols, and others at Ticketmaster, I learned how to push my Docker container image to Amazon ECR and pull it down for each Android build in GitLab CI. The final results were a marked improvement:\n\n![gitlab-ci](https://about.gitlab.com/images/blogimages/ticketmaster-assets/Gitlab8min.png 'GitLab CI in 8 minutes')*\u003Csmall>Our GitLab CI build and test takes under 8 minutes to build, test, and publish artifacts.\u003C/small>*\n\nLess than eight minutes total from commit to build, test and generate artifacts. We can use Gradle and the SonarQube plugin to help us calculate code quality with every commit to our codebase, giving us more valuable information in addition to passing failing tests to evaluate all of our merge requests. This gives our team numbers to measure and make goals against.\n\nAnd we could see everything in one place, in GitLab. The iOS team had a more complicated pipeline, but they quickly followed with their own, running their tests on local runners. Since February we have had weekly releases of our mobile apps, and GitLab CI has been a huge part of our success over the past few releases.\n\n## From GitLab artifact to weekly releases\n\n![weekly-release](https://about.gitlab.com/images/blogimages/ticketmaster-assets/WeeklyReleases.png \"Weekly Releases\")*\u003Csmall>GitLab CI has helped us get to weekly releases with more consistent adoption of new releases.\u003C/small>*\n\nWith the benefit of faster cycle time, and faster releases, we have seen other benefits. Since each release has a smaller change set, our crash-free rates and store ratings have improved. We have less time waiting for build and spend more time improving the quality of our products. Our fans are getting features into their hands more quickly and benefit from a higher-quality and a consistently improving product. The CI analytics available on GitLab are an additional scoreboard for our team to optimize and improve into the future.\n\nNow, whenever we integrate new SDKs into our mobile apps, we are helping other teams get their SDK’s set up in GitLab CI to push integrated builds to our suite of integration and functional tests as a part of our process. We are [getting to innovation faster](https://tech.ticketmaster.com/2016/11/08/getting-to-innovation-faster/).\n\nThings were looking pretty scrappy for our CI pipeline only a few months ago. Now it is a whole different ballgame. If your team is looking for a way to breathe fresh life into a legacy CI pipeline, I suggest taking a look at GitLab CI. It has been a real game changer for our mobile team at Ticketmaster.\n\n![review-1](https://about.gitlab.com/images/blogimages/ticketmaster-assets/Review1.png \"Ticketmaster Mobile Review 1\")\n![review-3](https://about.gitlab.com/images/blogimages/ticketmaster-assets/Review3.png \"Ticketmaster Mobile Review 2\")\n\n### About the Author\n\nJeff Kelsey is the Lead Engineer for Ticketmaster's Android development team. Find him on twitter [@jeffkelsey](https://twitter.com/jeffkelsey).\n",[720,9,763],{"slug":1800,"featured":6,"template":700},"continuous-integration-ticketmaster","content:en-us:blog:continuous-integration-ticketmaster.yml","Continuous Integration Ticketmaster","en-us/blog/continuous-integration-ticketmaster.yml","en-us/blog/continuous-integration-ticketmaster",{"_path":1806,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1807,"content":1813,"config":1819,"_id":1821,"_type":14,"title":1822,"_source":16,"_file":1823,"_stem":1824,"_extension":19},"/en-us/blog/continuous-machine-learning-development-with-gitlab-ci",{"title":1808,"description":1809,"ogTitle":1808,"ogDescription":1809,"noIndex":6,"ogImage":1810,"ogUrl":1811,"ogSiteName":685,"ogType":686,"canonicalUrls":1811,"schema":1812},"How machine learning ops works with GitLab and continuous machine learning","We share different machine learning use cases for CML projects using GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681802/Blog/Hero%20Images/gitlab_cml_dvc_banner.png","https://about.gitlab.com/blog/continuous-machine-learning-development-with-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How machine learning ops works with GitLab and continuous machine learning\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dr. Elle O'Brien\"}],\n        \"datePublished\": \"2020-12-01\",\n      }",{"title":1808,"description":1809,"authors":1814,"heroImage":1810,"date":1816,"body":1817,"category":1062,"tags":1818},[1815],"Dr. Elle O'Brien","2020-12-01","Continuous integration (CI) is standard practice in software development for\nspeeding up development cycles, and for keeping them short and painless. CI\nmeans making small commits, often, and automating tests so every commit is a\nrelease candidate.\n\n\nWhen a project involves machine learning (ML), though, new challenges arise:\nTraditional [version control systems](/topics/version-control/) (like Git)\nthat are key to CI struggle to manage large datasets and models.\n\nFurthermore, typical pass-fail tests are too coarse for understanding ML\nmodel performance – you might need to consider how several metrics, like\naccuracy, sensitivity, and specificity, are affected by changes in your code\nor data.\n\nData visualizations like confusion matrices and loss plots are needed to\nmake sense of the high-dimensional and often unintuitive behavior of models.\n\n\n## Continuous machine learning: an introduction\n\n\n[Iterative.ai](https://iterative.ai), the team behind the popular open\nsource version control system for ML projects [DVC](https://dvc.org) (short\nfor Data Version Control),\n\nhas recently released another open source project called\n[CML](https://cml.dev), which stands for continuous machine learning.\n\nCML is our approach to adapting powerful CI systems like GitLab CI to common\ndata science and ML use cases, including:\n\n\n- Automatic model training\n\n- Automatic model and dataset testing\n\n- Transparent and rich reporting about models and datasets (with data viz\nand metrics) in a merge request (MR)\n\n\n## Your first continuous machine learning report\n\n\nCML helps you put tables, data viz, and even sample outputs from models into\ncomments on your MRs, so you can review datasets and models like code.\n\nLet's see how to produce a basic report – we'll train an ML model using\nGitLab CI, and then report a model metric and confusion matrix in our MR.\n\n\n![Confusion\nMatrix](https://about.gitlab.com/images/blogimages/cml_confusion_matrix.jpg){:\n.shadow.medium.center}\n\nConfusion matrix\n\n{: .note.text-center}\n\n\nTo make this report, our `.gitlab-ci.yml` contains the following workflow:\n\n\n```\n\n# .gitlab-ci.yml\n\nstages:\n    - cml_run\n\ncml:\n    stage: cml_run\n    image: dvcorg/cml-py3:latest\n\n    script:\n        - pip3 install -r requirements.txt\n        - python train.py\n\n        - cat metrics.txt >> report.md\n        - echo >> report.md\n        - cml-publish confusion_matrix.png --md --title 'confusion-matrix' >> report.md\n        - cml-send-comment report.md\n\n```\n\n\nThe entire [project repository is available\nhere](https://gitlab.com/iterative.ai/cml-base-case/).\n\nThe steps consist of the following:\n\n\n- **Train**: This is a classic training step where we install requirements\n(like `pip` packages) and run the training script.\n\n- **Write a CML report**: Produced metrics are appended to a markdown\nreport.\n\n- **Publish a CML report**: CML publishes an image of the confusion matrix\nwith the embedded metrics to your GitLab MR.\n\n\nNow, when you and your teammates are deciding if your changes have had a\npositive effect on your modeling goals,\n\nyou have a dashboard of sorts to review. Plus, this report is linked by Git\nto your exact project version (data and code) and the runner used for\ntraining and the logs from that run.\n\n\nThis is the simplest use case for achieving continuous machine learning with\nCML and GitLab. In the next section we'll look at a more complex use case.\n\n\n## CML with DVC for data version control\n\n\nIn machine learning projects, you need to track changes in your datasets as\nwell as changes in your code.\n\nSince Git is frequently a poor fit for managing large files, we can use\n[DVC](https://dvc.org) to link remote datasets to your CI system.\n\n\n```\n\n# .gitlab-ci.yml\n\nstages:\n  - cml_run\n\ncml:\n  stage: cml_run\n  image: dvcorg/cml-py3:latest\n  script:\n    - dvc pull data\n\n    - pip install -r requirements.txt\n    - dvc repro\n\n    # Compare metrics to master\n    - git fetch --prune\n    - dvc metrics diff --show-md master >> report.md\n    - echo >> report.md\n\n    # Visualize loss function diff\n    - dvc plots diff\n      --target loss.csv --show-vega master > vega.json\n    - vl2png vega.json | cml-publish --md >> report.md\n    - cml-send-comment report.md\n```\n\n\nThe entire [project is available\nhere](https://gitlab.com/iterative.ai/cml-dvc-case).\n\nIn this workflow, we have additional steps that use DVC to pull a training\ndataset, run an experiment, and then use CML to publish the report in your\nMR.\n\n\n![CML with DVC](https://about.gitlab.com/images/blogimages/cml_dvc.jpg){:\n.shadow.medium.center}\n\nCML with DVC\n\n{: .note.text-center}\n\n\nFor more details about ML data versioning and tracking, check out the [DVC\ndocumentation](https://dvc.org/doc).\n\n\n## Summary\n\n\nWe made CML to adapt CI to machine learning, so data science teams can enjoy\nbenefits such as:\n\n\n- Your code, data, models, and training infrastructure (hardware and\nsoftware environment) will be Git versioned.\n\n- You’re automating work, testing frequently, and getting fast feedback\n(with visual reports if you use CML). In the long run, this will almost\ncertainly speed up your project’s development.\n\n- CI systems make your work visible to everyone on your team. No one has to\nsearch very hard to find the code, data, and model from your best run.\n\n\n### About the guest author\n\n\n_Dr. Elle O'Brien is a Ph.D data scientist at iterative.ai and co-creator of\n[CML](https://cml.dev) project. She is also a lecturer at\n[UMSI](https://www.si.umich.edu/)._\n",[9,268,232,827,851],{"slug":1820,"featured":6,"template":700},"continuous-machine-learning-development-with-gitlab-ci","content:en-us:blog:continuous-machine-learning-development-with-gitlab-ci.yml","Continuous Machine Learning Development With Gitlab Ci","en-us/blog/continuous-machine-learning-development-with-gitlab-ci.yml","en-us/blog/continuous-machine-learning-development-with-gitlab-ci",{"_path":1826,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1827,"content":1833,"config":1839,"_id":1841,"_type":14,"title":1842,"_source":16,"_file":1843,"_stem":1844,"_extension":19},"/en-us/blog/cross-project-pipeline",{"title":1828,"description":1829,"ogTitle":1828,"ogDescription":1829,"noIndex":6,"ogImage":1830,"ogUrl":1831,"ogSiteName":685,"ogType":686,"canonicalUrls":1831,"schema":1832},"How to trigger multiple pipelines using GitLab CI/CD","Discover how to trigger and visualize pipelines when you set up GitLab CI/CD across multiple projects.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666903/Blog/Hero%20Images/pipeline.jpg","https://about.gitlab.com/blog/cross-project-pipeline","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to trigger multiple pipelines using GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2019-07-24\",\n      }",{"title":1828,"description":1829,"authors":1834,"heroImage":1830,"date":1836,"body":1837,"category":718,"tags":1838},[1835],"Itzik Gan Baruch","2019-07-24","\n[Continuous integration (CI)](/solutions/continuous-integration/) is the practice of [automating code building and testing](/topics/ci-cd/) before it is\nmerged into the master or default branch. This allows developers to merge code early and frequently, while\nmitigating the risk of introducing new bugs into the master source code repository.\n\nWhile CI verifies that new code won't break when integrated with other code in the same repo, having\nall tests pass on that repo is only the first step. After running CI on the code, it is important to\ndeploy and run tests in a live environment. Moving from [CI to continuous delivery and deployment (CD)](/solutions/continuous-integration/)\nis [the next step of DevOps maturity](/topics/devops/). Deploying and then testing again allows code in one project\nto be tested together with other components and services which may be managed in other projects.\n\n## Why do I need to verify that my code works with other components?\n\nA good example could be a\nmicroservices architecture. Usually, different [microservices](/topics/microservices/) are managed in\ndifferent [projects](https://docs.gitlab.com/ee/user/project/) – each microservice has its own\nrepository and own pipeline. It's also very common for different teams to be\nresponsible for different microservices and their pipeline configurations. As a developer you will\nwant to confirm that your code changes don't break functionality of the dependent microservices.\nTherefore, you will want to execute tests on those microservices in addition to your project tests.\n\n## The cross-project pipeline\n\nWhen running your [project pipeline](/topics/ci-cd/cicd-pipeline/), you also want to trigger cross-project or multi-project pipelines,\nwhich will eventually deploy and test the latest version of all dependent microservices. To\nachieve this goal you need an easy, flexible and convenient way to trigger other\npipelines as part of your project CI. GitLab CI/CD offers an easy way to run a cross-project\npipeline by simply adding a pipeline trigger job in the CI configuration file.\n\n## GitLab CI/CD configuration file\n\nIn GitLab CI/CD, pipelines, and their component jobs and stages, are defined in\nthe [`.gitlab-ci.yml`](https://docs.gitlab.com/ee/ci/yaml/) file for each project. The\nfile is part of the project repository. It is fully versioned and developers can edit it with any\ncommon IDE of their choice. They do not have to ask the system admin or DevOps team to make\nchanges in the pipeline configuration as it is self-service. The `.gitlab-ci.yml` file defines the structure\nand order of the pipelines and determines what to execute\nusing [GitLab Runner](https://docs.gitlab.com/runner/) (the agent that runs the jobs), and what\ndecisions to make when specific conditions are encountered, like when a process succeeds or fails.\n\n## Add a cross-project pipeline triggering job\n\nSince GitLab 11.8, GitLab provides a new CI/CD configuration syntax for triggering cross-project\npipelines found in the [pipeline configuration file](https://docs.gitlab.com/ee/ci/yaml/).\nThe following code illustrates configuring a bridge job to trigger a downstream pipeline:\n\n```\n//job1 is a job in the upstream project\ndeploy:\n\tstage: Deploy\n\tscript: this is my script\n\n//job2 is a bridge job in the upstream project which triggers cross-project pipeline\nAndroid:\n\tstage: Trigger-cross-projects\n            trigger: mobile/android\n```\n\nIn the example above, as soon as the deploy job succeeds in the deploy stage, the Android\nbridge job is going to be started. The initial status of this job will be pending. GitLab will\ncreate a downstream pipeline in the mobile/android project and, as soon as the pipeline gets created,\nthe Android job will succeed. In this case mobile/android is a full path to that project.\n\nThe user who created the upstream pipeline needs to have access rights to the downstream\nproject (mobile/android in this case). If a downstream project cannot be found, or a user does not\nhave access rights to create a pipeline there, the Android job will be marked as failed.\n\n## Browse from upstream pipeline graphs to downstream\n\nGitLab CI/CD makes it possible to visualize the pipeline configuration. In the below illustration, the\nbuild, test, and deploy stages are parts of the upstream project. Once the deploy job succeeds, four\ncross-projects will be triggered in parallel and you will be able to browse to them by clicking on\none of the downstream jobs.\n\n![Build, test and deploy stages](https://about.gitlab.com/images/blogimages/Cross-proj-img1.png){: .shadow.medium.center}\n\nIn the below illustration the Service – Finance downstream pipeline is visible. We can now scroll\nleft to the upstream pipeline, scroll right back to the downstream pipeline or select another\ndownstream pipeline.\n\n![Service – Finance pipeline](https://about.gitlab.com/images/blogimages/Cross-proj-img2.png){: .shadow.medium.center}\n\n## Specifying a downstream pipeline branch\n\nIt is possible to specify a branch name that a downstream pipeline will use:\n\n```\ntrigger:\n     project: mobile/android\n     branch: stable-11-2\n```\n\nUse a project keyword to specify the full path to a downstream project. Use a branch keyword to\nspecify a branch name. GitLab will use a commit that is currently on the HEAD of the branch\nwhen creating a downstream pipeline.\n\n## Passing variables to a downstream pipeline\n\nSometimes you might want to pass variables to a downstream pipeline. You can do that using\nthe variables keyword, just like you would when defining a regular job.\n\n```\nAndroid:\n           variable:\n\t     ENVIRONMENT: ‘This is the variable value for the downstream pipeline’\n           stage: Trigger-cross-projects\n           trigger: mobile/android\n```\nThe ENVIRONMENT variable will be passed to every job defined in a downstream pipeline. It will be\navailable as an environment variable when GitLab Runner picks a job.\n\n## Cross-project pipeline summary\n\nThe `.gitlab-ci.yml` file defines the order of the CI/CD stages, which jobs to execute, and at which\nconditions to run or skip a job's execution. Adding a 'bridge job' with the `trigger` keyword to\nthis file can be used to trigger cross-project pipelines. We can pass parameters to jobs in\ndownstream pipelines, and even define a branch that a downstream pipeline will use.\n\nPipelines can be complex structures with many sequential and parallel jobs, and as we just\nlearned, sometimes they can trigger downstream pipelines. To make it easier to understand the\nflow of a pipeline, including its downstream pipelines, GitLab has pipeline graphs for viewing\npipelines and each pipeline's status.\n\n![Service – Finance pipeline](https://about.gitlab.com/images/blogimages/Cross-proj-img4.png){: .shadow.medium.center}\n\nHey community, what else would you like me to explain in a blog post? Let me know in the comments or tweet us [@gitlab](https://twitter.com/gitlab).\n\nCover image by [Tian Kuan](https://unsplash.com/@realaxer) on [Unsplash](https://unsplash.com)\n{: .note}\n",[9,721,695,874,875],{"slug":1840,"featured":6,"template":700},"cross-project-pipeline","content:en-us:blog:cross-project-pipeline.yml","Cross Project Pipeline","en-us/blog/cross-project-pipeline.yml","en-us/blog/cross-project-pipeline",{"_path":1846,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1847,"content":1853,"config":1859,"_id":1861,"_type":14,"title":1862,"_source":16,"_file":1863,"_stem":1864,"_extension":19},"/en-us/blog/data-driven-decision-making-with-sourcewarp",{"title":1848,"description":1849,"ogTitle":1848,"ogDescription":1849,"noIndex":6,"ogImage":1850,"ogUrl":1851,"ogSiteName":685,"ogType":686,"canonicalUrls":1851,"schema":1852},"SourceWarp: Make data-driven, agile DevSecOps decisions","How the SourceWarp approach and tool help make informed, agile decisions for CI/CD tools and DevSecOps platforms at GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682686/Blog/Hero%20Images/velocity2.png","https://about.gitlab.com/blog/data-driven-decision-making-with-sourcewarp","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"SourceWarp: Make data-driven, agile DevSecOps decisions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab Vulnerability Research Team\"}],\n        \"datePublished\": \"2023-04-13\",\n      }",{"title":1848,"description":1849,"authors":1854,"heroImage":1850,"date":1856,"body":1857,"category":718,"tags":1858},[1855],"GitLab Vulnerability Research Team","2023-04-13","\n\nAt GitLab, we use different strategies to make assessments about the stability\nor robustness of a feature by means of best practices such as staging\nenvironments, feature flags, or canary testing. We also use testing\nstrategies such as [A/B testing](/handbook/marketing/digital-experience/engineering-ab-tests/)\nto assess how users react to feature variants. \n\nHowever, our short release cycles require testing and benchmarking approaches that\nmake it possible to prototype, test, and benchmark ideas quickly (ideally while\ndeveloping them). We need an approach that works on large code\nbases, can help assess a feature **before** deployment to staging or\nproduction, and provides data to support data-driven decision making.\n\nTo address this need, we developed the SourceWarp tool: a record-and-replay framework\nfor source code management systems. In this blog post, we will explain our motivation\nfor creating SourceWarp and explain how we use it to inform data-driven decision making within the GitLab platform.\n\n## Motivation: Data-driven decision making in the DevSecOps context\n\n[DevSecOps](/topics/devsecops/) streamlines software development by allowing teams to ship features quickly\nand providing short feedback cycles for customers. These short feedback cycles can be used to monitor the impact of\na feature from the time it is shipped and inform developers and product\nmanagers about the success or failure of a given deployment.\n\nGitLab, as a heterogeneous DevSecOps platform, acts as an integration point for\ndifferent [CI/CD tools](https://docs.gitlab.com/ee/ci/) that often contribute\nto user-facing functionality. For example, the [vulnerability report](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/),\nwhich displays all detected vulnerabilities, is visible\nas a single functionality, but the data in the report may come from a\nnumber of different tools in various pipelines. The DevSecOps\nplatform collects and stores results in the backend database and keeps track of user actions on the\nfindings (through the UI or the API). A large portion of the automation in the platform\nis built around or initiated by code changes where the\nsource code management system or Git respoitory basically holds the input data. In\norder to test and benchmark new features for these systems effectively, the\ntesting and benchmarking approach needs to have some source code awareness.\n\nWe can use SourceWarp to achieve this. Let's dive in to a real-world example\nof how we used SourceWarp to help make an informed decision about a product integration.\n\n## Case study: Advanced vulnerability tracking\n\nAs a DevSecOps platform, GitLab provides automation\ncentered around code changes, where the source code is stored in a source code\nmanagement system. SourceWarp uses a Git repository as input, which we use to\nsource test-input data to test and benchmark our newly developed feature.\n\nIn a record phase, SourceWarp extracts commits from the source history that are\nrelevant with respect to a given test criterion and generates a patch replay\nsequence. In the monitor phase, SourceWarp replays the generated sequence on a\ntarget system. These phases are executed while continuously monitoring the\nDevSecOps platform to collect metrics and to generate a report that provides\nthe testing and benchmarking results.\n\nWe used SourceWarp to test and benchmark [advanced vulnerability tracking](https://docs.gitlab.com/ee/user/application_security/sast/#advanced-vulnerability-tracking),\nwhich identifies and deduplicates vulnerabilities in a changing code base. In our\nbenchmarking and testing experiment, we let SourceWarp automatically sample patch\nsequences from a slice of GitLab's source code repository history (2020-10-31\nand 2020-12-31) and replay them on two target systems: One system had advanced\nvulnerability tracking enabled, and the other one was using our old\nvulnerability tracking approach.\n\nAfter the application of every patch from the\npatch sequence, SourceWarp collected metrics from the target system that\nrecorded the observed vulnerabilities. We observed that our vulnerability\ntracking approach was 30% more effective than traditional\nvulnerability tracking where `\u003Cfile, line number>` are used to identify the\nlocation of a vulnerabilty. This means that advanced vulnerabiilty tracking\nreduces the manual effort of auditing vulnerabilities by 30%.\n\nIn addition, we\nobserved that with an increasing number of source code changes, the deduplication\neffectiveness of vulnerability tracking increases. Looking at the relatively\nshort timeframe from 2020-10-31 to 2020-12-31, the deduplication effectivness\nincreased from 11% to 30%, which suggests that the effectiveness increases over\ntime as the source code evolves.\n\nSourceWarp performed this experiment in an automated and reproducible way, and\nprovided data that was helpful in making an informed decision about the product\nintegration of vulnerability tracking. \n\n## Where to find more SourceWarp information\n\nThe SourceWarp approach is detailed in our research paper, \"[SourceWarp](/resources/downloads/research-paper-ast2023-sourcewarp.pdf): A scalable, SCM-driven testing and benchmarking approach to support data-driven and agile decision making for CI/CD tools and DevSecOps platforms,\" which will be presented at the 4th ACM/IEEE International Conference on Automation of Software Test ([AST 2023](https://conf.researchr.org/home/ast-2023)). \n\nThe [SourceWarp testing and benchmarking tool](https://gitlab.com/gitlab-org/vulnerability-research/foss/sourcewarp) is implemented in Ruby and is open source (MIT license).\nThe `README.md` provides information about the tool setup and implementation.\nYou can also see it in action in the demo below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/-9lk_Jhuq14\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\n## Useful Links\n\n- [Vulnerability Research Team](/handbook/engineering/development/sec/secure/vulnerability-research/)\n- [SourceWarp tool](https://gitlab.com/gitlab-org/vulnerability-research/foss/sourcewarp)\n- [Recorded Demo](https://www.youtube.com/watch?v=-9lk_Jhuq14)\n- [AST 2023](https://conf.researchr.org/home/ast-2023)\n- [Research Paper](/resources/downloads/research-paper-ast2023-sourcewarp.pdf)\n\nCover image by [Jason Corey](https://unsplash.com/@jason_corey_) on [Unsplash](https://unsplash.com/photos/AT5vuPoi8vc)\n{: .note}\n",[696,9,697],{"slug":1860,"featured":6,"template":700},"data-driven-decision-making-with-sourcewarp","content:en-us:blog:data-driven-decision-making-with-sourcewarp.yml","Data Driven Decision Making With Sourcewarp","en-us/blog/data-driven-decision-making-with-sourcewarp.yml","en-us/blog/data-driven-decision-making-with-sourcewarp",{"_path":1866,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1867,"content":1873,"config":1879,"_id":1881,"_type":14,"title":1882,"_source":16,"_file":1883,"_stem":1884,"_extension":19},"/en-us/blog/data-driven-devsecops-exploring-gitlab-insights-dashboards",{"title":1868,"description":1869,"ogTitle":1868,"ogDescription":1869,"noIndex":6,"ogImage":1870,"ogUrl":1871,"ogSiteName":685,"ogType":686,"canonicalUrls":1871,"schema":1872},"Data-driven DevSecOps: Exploring GitLab Insights Dashboards","Learn how to leverage GitLab Insights Dashboards to visualize key metrics, track project progress, and boost team productivity with customizable, data-driven views.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097210/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2811%29_78Dav6FR9EGjhebHWuBVan_1750097210214.png","https://about.gitlab.com/blog/data-driven-devsecops-exploring-gitlab-insights-dashboards","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Data-driven DevSecOps: Exploring GitLab Insights Dashboards\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ricardo Amarilla Villalba\"}],\n        \"datePublished\": \"2024-11-20\",\n      }",{"title":1868,"description":1869,"authors":1874,"heroImage":1870,"date":1876,"body":1877,"category":693,"tags":1878},[1875],"Ricardo Amarilla Villalba","2024-11-20","Metrics and analytics play a crucial role in driving productivity, quality,\nand success. GitLab, as a comprehensive DevSecOps platform, offers powerful\ntools for tracking and visualizing these vital metrics through its Insights\nDashboards. In this article, you'll learn how to use the Insights Dashboards\nin your environment.\n\n\n## Introduction to GitLab metrics and analytics \n\n\nGitLab provides an array of metrics and analytics tools that cover various\naspects of the DevSecOps lifecycle:\n\n\n1. [Productivity\nAnalytics](https://docs.gitlab.com/ee/user/analytics/productivity_analytics.html):\nTrack team velocity, cycle time, and lead time.  \n\n2. [Code Review\nAnalytics](https://docs.gitlab.com/ee/user/analytics/code_review_analytics.html):\nMeasure code quality, test coverage, and review efficiency.  \n\n3. [CI/CD\nAnalytics](https://docs.gitlab.com/ee/user/analytics/ci_cd_analytics.html):\nMonitor pipeline performance and deployment frequency.  \n\n4. [Value Stream\nAnalytics](https://docs.gitlab.com/ee/user/group/value_stream_analytics/):\nVisualize the flow of work from idea to production.  \n\n5. [Insights](https://docs.gitlab.com/ee/user/project/insights/): Explore\nand visualize data about your projects and groups.\n\n\nThese metrics offer invaluable insights into your development process,\nhelping teams identify bottlenecks, optimize workflows, and make data-driven\ndecisions.\n\n\n## Leveraging labels for specific metrics\n\n\nOne of GitLab's most powerful, yet understated features, is Labels, which\nallows you to filter and focus on specific metrics with pinpoint accuracy.\nBy strategically applying labels to issues, merge requests, and epics, you\ncan create custom views that provide targeted insights into your project's\nperformance and progress.\n\n\nLabels in GitLab act as versatile identifiers, allowing you to categorize\nand organize your work items with great flexibility. Whether you're tracking\nfeature development, bug fixes, or team-specific tasks, labels enable you to\nslice and dice your project data in ways that reveal meaningful patterns and\ntrends. This concept parallels the use of tags in cloud deployments, where\nresources are labeled for easier management, cost allocation, and\noperational insights.\n\n\nBy thoughtfully labeling your work items, you're essentially creating a\nsophisticated labeling system that can be leveraged to generate custom\ndashboards and reports. This approach empowers you to zoom in on the metrics\nthat matter most to your team or stakeholders, providing a clear and focused\nview of your project's health and momentum.\n\n\n## How to configure GitLab Insights\n\n\nGitLab Insights allow you to explore and visualize data about your projects\nand groups. They provide valuable analytics on various aspects such as\nissues created and closed during a specified period, average time for merge\nrequests to be merged, and triage hygiene. Insights can be configured for\nboth projects and groups.\n\n\nTo configure Insights:\n\n\n1. For project insights:  \n   * Create a file named `.gitlab/insights.yml` in the root directory of your project.  \n2. For group insights:  \n   * Create a `.gitlab/insights.yml` file in a project that belongs to your group.  \n   * Go to your group's **Settings > General**.  \n   * Expand the **Analytics section** and find the **Insights section**.  \n   * Select the project containing the configuration file and save changes.\n\nThe `.gitlab/insights.yml` file is a YAML file where you define the\nstructure and order of charts in a report, as well as the style of charts to\nbe displayed. Each chart definition includes parameters such as title,\ndescription, type, and query to specify the data source and filtering\nconditions.\n\n\nTo view insights, navigate to **Analyze > Insights** in your project or\ngroup.\n\n\n![View default Insights\nDashboard](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097218/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097217972.png)\n\n\n## Customize merge request insights\n\n\nWhile the default view provides valuable raw information, we can customize\nthe Insights Dashboard to uncover additional layers of information, such as\nwhich team was responsible for each merge request and what type of problem\neach one solved.\n\n\n## Merge request insights for each squad and requirement type\n\n\nMeasuring squad productivity in GitLab can be challenging, especially when\nthe GitLab group and subgroup structure doesn't align perfectly with your\nsquad organization. Here's how to overcome these challenges and effectively\ntrack squad productivity:\n\n\n### **Setting up squad-based metrics**\n\n\n1. **Label creation:** Create unique scope labels for each squad (e.g.,\n`squad::alpha`, `squad::beta`) and each requirement type (e.g., `type::bug`,\n`type::feature`, `type::maintenance`).\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ZUOzORIUJeU?si=T8eHeGizS3blYFHB\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n2. **Label application:** Consistently apply these squad labels to all\nissues and merge requests handled by each squad, regardless of the project\nor group they're in.  \n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/fJ9entEBZG8?si=MlM6mKirEdkmwDDJ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n**Hints:**  \n   * Use GitLab API to apply labels massively to existing open, merged, and closed MRs.  \n   * Add/remove/update labels as part of your GitLab CI pipeline.  \n   * Leverage the GitLab Triage Bot to automate the labeling process.  \n\n3. Dashboard setup: Create a `.gitlab/insights.yml` file in your project\nrepository with custom charts for team-specific and type-specific merge\nrequest insights.\n\n\n```\n\n\n## Default Merge Requests insights.yml \n\nmergeRequests:\n  title: Merge requests dashboard\n  charts:\n    - title: Merge requests merged per week \n      type: bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          group_by: week\n          period_limit: 12\n    - title: Merge requests merged per month\n      type: bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          group_by: month\n          period_limit: 3\n\n## Per-teams Merge Requests insights.yml\n\nmergeRequestsTeams:\n  title: Merge requests dashboard per teams\n  charts:\n    - title: Merge requests merged per week \n      type: stacked-bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          group_by: week\n          period_limit: 12\n          collection_labels:\n            - squad::alpha\n            - squad::beta\n    - title: Merge requests merged per month\n      type: stacked-bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          group_by: month\n          period_limit: 3\n          collection_labels:\n            - squad::alpha\n            - squad::beta\n\n## Per-teams and Type Merge Requests insights.yml\n\nmergeRequestsTeamsAndType:\n  title: Per Teams and Type - Merge requests dashboard\n  charts:\n    - title: Merge requests merged per week - Squad Alpha\n      type: stacked-bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          filter_labels: squad::alpha\n          collection_labels:\n            - type::feature\n            - type::bug\n            - type::maintenance\n          group_by: week\n          period_limit: 12\n    - title: Merge requests merged per month - Squad Alpha\n      type: stacked-bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          filter_labels: squad::alpha\n          collection_labels:\n            - type::feature\n            - type::bug\n            - type::maintenance\n          group_by: month\n          period_limit: 3\n    - title: Merge requests merged per week - Squad Beta\n      type: stacked-bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          filter_labels: squad::beta\n          collection_labels:\n            - type::feature\n            - type::bug\n            - type::maintenance\n          group_by: week\n          period_limit: 12\n    - title: Merge requests merged per month - Squad Beta\n      type: stacked-bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          filter_labels: squad::beta\n          collection_labels:\n            - type::feature\n            - type::bug\n            - type::maintenance\n          group_by: month\n          period_limit: 3\n\n```\n\n\nBy implementing these customizations, you can create insightful dashboards\nthat provide a clear view of merge request activity per team and requirement\ntype, allowing you to visualize trends over time, compare performance\nbetween squads, and analyze the distribution of different types of work for\neach squad. \n\n\n![dashboards with view of MR activity per team and requirement\ntype](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097218/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097217972.png)\n\n\n![dashboard comparing performance between\nsquads](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097218/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097217974.png)\n\n\n## Get started today\n\n\nGitLab Insights is just the tip of the iceberg when it comes to metrics and\nanalytics. To explore the full range of GitLab's powerful analytics\nfeatures, including Value Stream Analytics, CI/CD Analytics, and Code Review\nmetrics, check out our Value Stream Management product tour:\n\n\n[![Value Stream Management product\ntour](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097218/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2024-11-20_at_12.28.08_PM_aHR0cHM6_1750097217976.png)](https://gitlab.navattic.com/vsm)\n\n\n> Ready to start your own metrics journey? Sign up for a [free trial\nof GitLab Ultimate\ntoday](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com%2F)\nand unlock the full potential of data-driven DevSecOps.\n\n\n## Read more\n\n- [Scheduled Reports Generation tool simplifies value stream\nmanagement](https://about.gitlab.com/blog/new-scheduled-reports-generation-tool-simplifies-value-stream-management/)\n\n- [Getting started with the new GitLab Value Streams\nDashboard](https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard/)\n\n- [AI Impact analytics dashboard measures the ROI of\nAI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n",[9,495,693,695,917,1127],{"slug":1880,"featured":91,"template":700},"data-driven-devsecops-exploring-gitlab-insights-dashboards","content:en-us:blog:data-driven-devsecops-exploring-gitlab-insights-dashboards.yml","Data Driven Devsecops Exploring Gitlab Insights Dashboards","en-us/blog/data-driven-devsecops-exploring-gitlab-insights-dashboards.yml","en-us/blog/data-driven-devsecops-exploring-gitlab-insights-dashboards",{"_path":1886,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1887,"content":1893,"config":1899,"_id":1901,"_type":14,"title":1902,"_source":16,"_file":1903,"_stem":1904,"_extension":19},"/en-us/blog/defend-cicd-security",{"title":1888,"description":1889,"ogTitle":1888,"ogDescription":1889,"noIndex":6,"ogImage":1890,"ogUrl":1891,"ogSiteName":685,"ogType":686,"canonicalUrls":1891,"schema":1892},"Defending the CI/CD pipeline","Speed to launch often comes at the cost of security – but it doesn’t have to. Here are four ways to achieve both by using a CI/CD pipeline","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678499/Blog/Hero%20Images/defend-cicd-security.jpg","https://about.gitlab.com/blog/defend-cicd-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Defending the CI/CD pipeline\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-11-19\",\n      }",{"title":1888,"description":1889,"authors":1894,"heroImage":1890,"date":1896,"body":1897,"category":1040,"tags":1898},[1895],"Vanessa Wegner","2019-11-19","\n[CI/CD](/topics/ci-cd/) is a way to release software as quickly as possible, which, unfortunately, often comes at the expense of security. [Synopsys and \n451 Research found](https://www.synopsys.com/blogs/software-security/security-challenges-cicd-workflows/) \nthe most significant [application security](/topics/devsecops/) challenges in CI/CD workflows \ninclude a lack of automated, integrated security testing tools, inconsistent \nmethods, slowed workflows, and too many false positives.\n\nThere’s also the challenge of securing the pipeline itself. Traditional and \nmanual security practices can’t scale to the level of CI/CD – the resulting delivery pipelines expand a company’s attack surface by a significant measure. The pipeline represents an end-to-end lifecycle for your software which makes it a \nprime target for hackers. It's clear [CI/CD security](/solutions/security-compliance/) can’t be an afterthought. DevOps teams \nmust bring security issues to the forefront of their considerations throughout the SDLC. \n\n## Security risks in enterprise CI/CD\n\nCI/CD significantly broadens your attack surface with a lengthy list of \ncomponents – repositories, servers, containers, and for those who don’t use \nGitLab, a wide array of tools. A large number of moving pieces presents a \ntempting ROI for hackers – one compromised segment of the ecosystem could open \nup the entire infrastructure for exploitation. [As tech journalist Twain Taylor \nexplains](https://thenewstack.io/the-biggest-security-risks-lurking-in-your-ci-cd-pipeline/), \nsecuring the CI/CD pipeline is not a straightforward process. Teams need to study the \npipeline, understand what information the pipeline ingests, uncover any major \nvulnerabilities and find ways to eliminate those risks.\n\nAlso, tools that lack transparency, require frequent switching \nbetween platforms, and inhibit the overall workflow are less likely to be \nadopted – and more likely to be worked around. Workarounds can create friction in the pipeline which can mean inconsistent \ntesting and remediation, all of which can allow more vulnerabilities to make their way \nthrough to production and launch.\n\n## Defending against CI/CD pipeline risks\n\nSecure CI/CD can be achieved through [DevSecOps](/topics/devsecops/) but you’ll need a mature CI/CD solution to get you there. In addition to the \nstability of the solution, your lifecycle ecosystem must be well-maintained and \neasily monitored for suspicious activity. Four of the most important aspects of \na secure CI/CD pipeline are automation, access management, positive user \nexperience, and transparency.\n\n### Automation\n\nAutomation, at the very least, should allow you to bring your security \npractices (especially [testing](/stages-devops-lifecycle/application-security-testing/)) \nup to the speed and scale of CI/CD. The value of automation magnifies when \nprocesses are standardized across teams and organizations. By introducing \nrepeatability to your projects, you’re also creating expected functionality and operations within your pipeline. When there are behaviors \nor activities that don’t align to the expected, a red flag will be triggered alerting developers to potential threats.\n\n### Access management\n\nAccess rights should be considered for both human-to-tool and tool-to-tool \ninteractions. [Tripwire recommends](https://www.tripwire.com/state-of-security/devops/security-ci-cd-pipeline-flowing/) \nrequiring authentication for anyone to push changes to the pipeline, \nimplementing login tracking, and confirming that builds reside on secure \nservers. \n\nCommunication between tools and components should be carefully managed \nto ensure that access is only granted on an as-needed basis. The New Stack's Twain also notes it’s important to consider what secrets are contained in pipeline scripts. He recommends removing any keys, credentials, and secrets from scripts and \nprotecting them with trusted secrets managers. He also suggests implementing \naccess control across your entire toolchain to revoke anything anonymous or shared, and to regularly audit the controls across the \necosystem. \n\n### User experience\n\nSeamless integration between tools will make a night-and-day difference in \nsecuring your CI/CD pipeline (alternatively, you could also use [a single tool \nfor the entire lifecycle]/handbook/product/single-application/)). \nEven though security is gaining traction in the minds of non-security \nprofessionals, it still remains a challenge for many development teams. Provide \ndevelopers with tools and practices that are standard across the organization, \nand reduce friction between tools as much as possible. \n\nWith lower barriers to \nadoption, your team will be less likely to create workarounds that could \njeopardize your business or customers. Providing users with immediate \nfeedback on the security of their code will enable them to remediate on the \nspot and serve an educational purpose, showing developers what to watch out \nfor when writing code. \n\n### Transparency\n\nIt's vital to have a view into what happens throughout the CI/CD pipeline. Maintain a single source of truth that logs every change – \nas well as its origin – and include functionality that allows sign-off for any \nhigh-stakes updates. Transparency also builds accountability among team members, \nreenforcing the idea that everyone is responsible for security. Lastly, \ntransparency is crucial to your team communication strategy. Methodologies and \nknowledge should be communicated openly and thoroughly, so that everyone on the \nteam understands how to apply best practices and what the intended outcomes are.\n\n## Speed and security: No longer a paradox\n\nEach of the above steps will help your security efforts shift left in the \nSDLC. Moving it all earlier in the process will enable you to release secure, quality software at the \nspeed of the business. This can only happen if there is true collaboration between development, operations, \nand security. Set policies and standard practices, understand respective \ngoals, and foster a culture of responsibility for the software as a \nwhole – and not just one facet of its creation or performance.\n\n## The security benefits of a single CI/CD tool for the entire lifecycle\n\nIt’s extremely important to use established tools that have been thoroughly \nvetted by both your internal teams and the market at large. That being said, \nfinding the best-in-class tools for every phase of the lifecycle and then \nsuccessfully (and securely) stringing them together can be a nightmare and result in untold technical debt. A single CI/CD tool relieves much of \nthat burden, by eliminating unnecessary platform switching and enabling high \ntransparency throughout the pipeline. With GitLab in particular, security \nchecks are embedded within the development workflow, which both reduces \nfriction for developers and provides a single source of truth for the entire \npipeline.\n\nRegardless of your tool (or tools) of choice, it’s critical that you and your \nteam prioritize security in all aspects of work.\n\nCover image by [Boban Simonovski](https://unsplash.com/@3031n) on [Unsplash](https://unsplash.com/photos/akQ06aB6MfM)\n{: .note}\n",[9,721,697],{"slug":1900,"featured":6,"template":700},"defend-cicd-security","content:en-us:blog:defend-cicd-security.yml","Defend Cicd Security","en-us/blog/defend-cicd-security.yml","en-us/blog/defend-cicd-security",{"_path":1906,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1907,"content":1913,"config":1920,"_id":1922,"_type":14,"title":1923,"_source":16,"_file":1924,"_stem":1925,"_extension":19},"/en-us/blog/demystifying-ci-cd-variables",{"title":1908,"description":1909,"ogTitle":1908,"ogDescription":1909,"noIndex":6,"ogImage":1910,"ogUrl":1911,"ogSiteName":685,"ogType":686,"canonicalUrls":1911,"schema":1912},"GitLab environment variables demystified","CI/CD variables are useful (and flexible) tools to control jobs and pipelines. We unpack everything you need to know about GitLab environment variables.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664679/Blog/Hero%20Images/blog-image-template-1800x945__24_.png","https://about.gitlab.com/blog/demystifying-ci-cd-variables","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab environment variables demystified\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Veethika Mishra\"}],\n        \"datePublished\": \"2021-04-09\",\n      }",{"title":1908,"description":1909,"authors":1914,"heroImage":1910,"date":1916,"body":1917,"category":718,"tags":1918,"updatedDate":1919},[1915],"Veethika Mishra","2021-04-09","There is a lot of flexibility when it comes to defining and using variables\nfor [CI/CD](https://about.gitlab.com/topics/ci-cd/). Variables are extremely\nuseful for controlling jobs and pipelines, and they help you avoid\nhard-coding values in your `.gitlab-ci.yml` configuration file. The\ninformation in this post should weave a larger picture by bringing together\nall (or most) of the information around defining and handling variables,\nmaking it easier to understand the scope and capabilities. Relevant\ndocumentation is linked throughout the post.\n\n\nIn [GitLab CI/CD](https://docs.gitlab.com/ee/ci/), variables can be used to\ncustomize jobs by defining and storing values. When using variables there is\nno need to hard code values. In GitLab, CI/CD variables can be defined by\ngoing to **Settings >> CI/CD >> Variables**, or by simply defining them in\nthe `.gitlab-ci.yml` file.\n\n\nVariables are useful for configuring third-party services for different\ndeployment environments, such as `testing`, `staging`, `production`, etc.\nModify the services attached to those environments by simply changing the\nvariable that points to the API endpoint the services need to use. Also use\nvariables to configure jobs and then make them available as environment\nvariables within the jobs when they run.\n\n\n![GitLab reads the .gitlab-ci.yml file to scan the referenced variable and\nsends the information to the GitLab Runner. The variables are exposed on and\noutput by the\nrunner.](https://about.gitlab.com/images/blogimages/demystifying-ci-cd-variables/variables_processing.jpeg)\n\n\n## The relationship between variables and environments\n\n\nSoftware development as a process includes stages to test a product before\nrolling it out to users.\n[Environments](https://docs.gitlab.com/ee/ci/environments/) are used to\ndefine what those stages look like and it may differ between teams and\norganizations.\n\n\nOn the other hand, variables are data values that are likely to change as a\nresult of user interaction with a product. For example, their age,\npreference, or any input you could possibly think of that might determine\ntheir next step in the product task-flow.\n\n\nWe often hear the term [environment\nvariable](https://docs.gitlab.com/ee/administration/environment_variables.html).\nThese are variables that are defined in a given environment, but outside the\napplication. GitLab CI/CD variables provide developers with the ability to\nconfigure values in their code. Using variables is helpful because it\nensures that the code is flexible. GitLab CI/CD variables allow users to\nmodify an application deployed to a certain environment without making any\nchange to code. It is simple to run tests or even integrate third-party\nservices by changing a configuration environment variable outside the\napplication.\n\n\n## The scope of variables for CI/CD\n\n\n![Order of precedence for CI/CD variables: 1) Manual pipeline run, trigger\nand schedule pipeline variables, 2) Project level, group level, instance\nlevel protected variables, 3) Inherited CI/CD variables, 4) Job level,\nglobal yml defined variables, 5) Deployment variables, 6) Pre-defined CI/CD\nvariables](https://about.gitlab.com/images/blogimages/demystifying-ci-cd-variables/variables_precedence.jpeg)\n\n\n### `.gitlab-ci.yml` defined variables\n\n\nVariables that need to be available in the job environment can be added to\nGitLab. These CI/CD variables are meant to store non-sensitive project\nconfiguration, like the database URL in the `.gitlab-ci.yml` file. Reuse\nthis variable in multiple jobs or scripts, wherever the value is needed. If\nthe value changes, you only need to update the variable once, and the change\nis reflected everywhere the variable is used.\n\n\n### Project CI/CD variables\n\n\nMoving a step above the repository-specific requirements, you can define\nCI/CD variables in [project\nsettings](https://docs.gitlab.com/ee/ci/variables/#for-a-project), which\nmakes them available to CI/CD pipelines. These are stored out of the\nrepository (not in the `.gitlab-ci.yml` file), but are still available to\nuse in the CI/CD configuration and scripts. Storing the variables outside\nthe `.gitlab-ci.yml` file keeps these values limited to a project-only\nscope, and not saved in plain text in the project.\n\n\n### Group and instance CI/CD variables\n\n\nSome variables are relevant at the group level, or even instance level, and\ncould be useful to all projects in a group or instance. Define the variables\nin the [group or instance\nsettings](https://docs.gitlab.com/ee/ci/variables/#for-a-group) so all\nprojects within those scopes can use the variables without actually needing\nto know the value  or having to create the variables for the lower scope.\nFor example, a common value that needs to be updated in multiple projects\ncan be easily managed if it stays up-to-date in a single place.\nAlternatively, multiple projects could use a specific password without\nactually needing to know the value of the password itself.\n\n\n## Jobs and pipelines as environments\n\n\nGitLab CI/CD variables, besides being used as environment variables, also\nwork in the scope of the `.gitlab-ci.yml` configuration file to configure\npipeline behavior, unrelated to any environment. The variables can be stored\nin the project/group/instance settings and be made available to jobs in\npipelines.\n\n\nFor example:\n\n\n```  \n\njob:  \n  rules:  \n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH  \n  script:  \n  - echo \"This job ran on the $CI_COMMIT_BRANCH branch.\"  \n```\n\n\nThe variable `($CI_COMMIT_BRANCH)` in the script section runs in the scope\nof the job in which it was defined. This scope is the \"job environment\" –\nmeaning, when the job starts, the GitLab runner starts up a Docker container\nand runs the job in that environment. The runner will make that variable\n(and all other predefined or custom variables) available to the job, and it\ncan display their value in the log output if needed.\n\n\nBut the variable is **also** used in the `if:` section to determine when the\njob should run. That in itself is not an environment, which is why we call\nthese CI/CD variables. They can be used to dynamically configure your CI/CD\njobs, **as well** as be used as environment variables when the job is\nrunning.\n\n\n## Predefined variables\n\n\nA number of variables are\n[predefined](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html)\nwhen a GitLab CI/CD pipeline starts. A user can immediately access values\nfor things like commit, project, or pipeline details without needing to\ndefine the variables themselves.\n\n\n## Custom CI/CD variables\n\n\n![Runners can create two kinds of custom CI/CD variables: Type and\nFile.](https://about.gitlab.com/images/blogimages/demystifying-ci-cd-variables/variable_types.jpeg)\n\n\nWhen creating a CI/CD variable in the settings, GitLab gives the user more\nconfiguration options for the variable. Use these extra configuration\noptions for stricter control over more sensitive variables:\n\n\n**Environment scope:** If a variable only ever needs to be used in one\nspecific environment, set it to only ever be available in that environment.\nFor example, you can set a deploy token to only be available in the\n`production` environment.\n\n\n**Protected variables:** Similar to the environment scope, you can set a\nvariable to be available only when the pipeline runs on a protected branch,\nlike your default branch.\n\n\n**Variable type:** A few applications require configuration to be passed to\nit in the form of a file. If a user has an application that requires this\nconfiguration, just set the type of variable as a \"File\". Configuring the\nCI/CD variable this way means that when the runner makes the variable\navailable in the environment, it actually writes it out to a temporary file,\nand stores the path to the file as the value. Next, a user can pass the path\nto the file to any applications that need it.\n\n\nAlong with the listed ways of defining and using variables, GitLab\nintroduced a feature that generates pre-filled variables when there's a need\nto run a pipeline manually. Prefilled variables reduce the chances of\nrunning into an error and makes running the pipeline easier.\n\n\n**Masked variables:** [Masked\nvariables](https://docs.gitlab.com/ee/ci/variables/#mask-a-cicd-variable)\nare CI variables that have been **hidden in job logs** to prevent the\nvariable’s value from being displayed. \n\n\n**Masked and hidden variables:** Introduced in [GitLab\n17.4](https://about.gitlab.com/releases/2024/09/19/gitlab-17-4-released/#hide-cicd-variable-values-in-the-ui),\n[Masked and\nhidden](https://docs.gitlab.com/ee/ci/variables/#hide-a-cicd-variable)\nvariables provide the same masking feature from job logs and **keep the\nvalue hidden** **in the Settings UI**. We do not recommend using either of\nthese variables for sensitive data (e.g. secrets) as they can be\ninadvertently exposed. \n\n\n## Secrets\n\n\nA secret is a sensitive credential that should be kept confidential.\nExamples of a secret include:\n\n\n* Passwords  \n\n* SSH keys  \n\n* Access tokens  \n\n* Any other types of credentials where exposure would be harmful to an\norganization\n\n\nGitLab currently enables its users to [use external secrets in\nCI](https://docs.gitlab.com/ee/ci/secrets/), by leveraging HashiCorp Vault,\nGoogle Cloud Secret Manager, and Azure Key Vault to securely manage keys,\ntokens, and other secrets at the project level. This allows users to\nseparate these secrets from other CI/CD variables for security reasons.\n\n\n### GitLab Secrets Manager\n\n\nBesides providing support for external secrets in CI, GitLab is also working\non introducing a [native solution to secrets\nmanagement](https://gitlab.com/groups/gitlab-org/-/epics/10108) to securely\nand conveniently store secrets within GitLab. This solution will also help\ncustomers use the stored secrets in GitLab specific components and\nenvironments, and easily manage access at namespace groups and projects\nlevel. \n\n\n## Read more\n\n* [GitLab native secrets manager to give software supply chain security a\nboost](https://about.gitlab.com/blog/gitlab-native-secrets-manager-to-give-software-supply-chain-security-a-boost/)\n\n\n***Disclaimer:** This blog contains information related to upcoming\nproducts, features, and functionality. It is important to note that the\ninformation in this blog post is for informational purposes only. Please do\nnot rely on this information for purchasing or planning purposes. As with\nall projects, the items mentioned in this blog and linked pages are subject\nto change or delay. The development, release, and timing of any products,\nfeatures, or functionality remain at the sole discretion of GitLab.*\n",[786,695,1064,785,9,917],"2025-01-13",{"slug":1921,"featured":6,"template":700},"demystifying-ci-cd-variables","content:en-us:blog:demystifying-ci-cd-variables.yml","Demystifying Ci Cd Variables","en-us/blog/demystifying-ci-cd-variables.yml","en-us/blog/demystifying-ci-cd-variables",{"_path":1927,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1928,"content":1934,"config":1940,"_id":1942,"_type":14,"title":1943,"_source":16,"_file":1944,"_stem":1945,"_extension":19},"/en-us/blog/dependency-proxy-updates",{"title":1929,"description":1930,"ogTitle":1929,"ogDescription":1930,"noIndex":6,"ogImage":1931,"ogUrl":1932,"ogSiteName":685,"ogType":686,"canonicalUrls":1932,"schema":1933},"Using the Dependency Proxy to improve your pipelines","The Dependency Proxy helps make pipelines faster and mitigates Docker Hub rate limits.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681815/Blog/Hero%20Images/dependency_proxy_header.jpg","https://about.gitlab.com/blog/dependency-proxy-updates","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Using the Dependency Proxy to improve your pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Steve Abrams\"}],\n        \"datePublished\": \"2020-12-15\",\n      }",{"title":1929,"description":1930,"authors":1935,"heroImage":1931,"date":1937,"body":1938,"category":978,"tags":1939},[1936],"Steve Abrams","2020-12-15","{::options parse_block_html=\"true\" /}\n\n\n\n\nHi! I'm Steve, a backend engineer at GitLab. I work on the Package stage,\nwhich includes the Dependency Proxy.\n\n\nIn versions 13.6 and 13.7, we improved the [Dependency\nProxy](https://docs.gitlab.com/ee/user/packages/dependency_proxy/) so it's\nno longer an [MVC\nfeature](https://handbook.gitlab.com/handbook/product/product-principles/#the-minimal-viable-change-mvc).\nBefore, the Dependency Proxy was only available to paid users who may have\nbeen wary to use it, because they did not want to be forced to use a public\ngroup. Now the Dependency Proxy is a robust free feature that can really\nprovide value for free and paid users alike.\n\n\nIf you haven't tried the feature out before, now is a great time to take a\nlook. If you have previously tried the Dependency Proxy and found it was not\nquite the solution you were looking for, I invite you to take a look at the\nnew functionality detailed here. The Dependency Proxy is more available,\nmore secure, and easier to use than ever. These updates also come right as\nDocker Hub has rolled out rate limits on image pulls, which the Dependency\nProxy can help alleviate.\n\n\nYou can also watch a demo of most of these features in this video:\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Nc4nUo7Pq08\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n## Move to Core\n\n\nIn 13.6, we moved the [Dependency Proxy to\nCore](/releases/2020/11/22/gitlab-13-6-released/#the-dependency-proxy-is-now-open-source).\nThe ability to speed up pipelines and create a safety net behind Docker Hub\nseemed like functionality that everyone should benefit from.\n\n\n## Support for private groups\n\n\nStarting in 13.7, you can now use the Dependency Proxy with all groups. Each\ngroup and subgroup can have its own space to cache images.\n\n\n![Dependency Proxy\ninterface](https://about.gitlab.com/images/blogimages/dependency_proxy_interface.png)\n\n\n## Authentication\n\n\n[Authentication](https://docs.gitlab.com/ee/user/packages/dependency_proxy/#authenticate-with-the-dependency-proxy)\nis also new in 13.7. If you had previously used the Dependency Proxy, you\nwill need to update your CI scripts or workflow to make sure that you are\nnow logged in.\n\n\nAuthentication was not only necessary to enable the ability to support\nprivate groups with the Dependency Proxy, but it's also a security upgrade.\nThe Dependency Proxy caches image data in your group's storage, so without\nauthentication, public groups could easily be abused to store images that\nyour group might not even be using.\n\n\n### How does it work\n\n\nThe Dependency Proxy is a proxy, so from the perspective of the Docker\nclient, it is just another registry to authenticate with:\n\n\n```shell\n\ndocker login --username stanley --password tanuki gitlab.com\n\n```\n\n\nWhen Docker makes a request to a registry it first asks:\n\n\n```shell\n\nGET gitlab.com/v2 # are you a registry?\n\n```\n\n\nTo which GitLab responds:\n\n\n```shell\n\n401 Unauthorized\n\n\nWWW-Authenticate: Bearer realm=https://gitlab.com/auth/jwt,\nservice=dependency_proxy\n\n# Yes! But you have to get permission to access me.\n\n# Please request a token from this other URL first.\n\n```\n\n\nThen Docker requests a token using the username and password you supplied,\nand if things check out, GitLab returns a JWT. Docker uses it to make its\nnext request, which in the case of the Dependency Proxy is the image pull.\nIf things don't check out, you'll likely see a `403 Forbidden` error.\n\n\n## Docker Hub rate limiting\n\n\nIn November 2020, Docker Hub began rate limiting image pulls. The Dependency\nProxy was already caching the image layers (blobs), so it made sense that\n[the Dependency Proxy should help mitigate this problem for\nusers](https://docs.gitlab.com/ee/user/packages/dependency_proxy/#docker-hub-rate-limits-and-the-dependency-proxy).\n\n\nIt is not uncommon for a project's pipeline to run every time a user pushes\na commit. In an active project or group, this could happen many times in an\nhour. If your CI script starts with something as simple as:\n\n\n```yaml\n\nimage: node:latest\n\n```\n\n\nEvery time your pipeline runs, even though you are using the same image\nevery time, Docker will count an additional image pull against your account.\n\n\nAn image consists of many different files, and a `docker pull` command will\nmake several requests. So what counts as one image pull?\n\n\nThere are two types of files that make up an image. First is the manifest.\nYou can think of it as a table of contents for an image. It contains\ninformation about what layers, or blobs, the image is made of. Once the\nDocker client has received the manifest, it will make a request for each\nblob described in the manifest.\n\n\nDocker uses the [manifest requests to count the image\npulls](https://docs.docker.com/docker-hub/download-rate-limit/). This means\nthat if the Dependency Proxy is going to help mitigate the rate limiting, it\nneeds to store the manifest in addition to the blobs. This presents a small\nproblem: a manifest is usually requested by tag name, which is a mutable\nreference. If I request `node:latest` this week, it might be different than\nthe `node:latest` I requested last week. Each manifest contains a digest, or\nhash signature, that can be used to tell if it has changed. You can see this\ndigest when you pull the image:\n\n\n```shell\n\n$ docker pull alpine:latest\n\n\nlatest: Pulling from library/alpine\n\nDigest:\nsha256:a126728cb7db157f0deb377bcba3c5e473e612d7bafc27f6bb4e5e083f9f08c2\n\nStatus: Image is up to date for alpine:latest\n\ndocker.io/library/alpine:latest\n\n```\n\n\nDocker has allowed HEAD requests to be made for a manifest for free. The\nHEAD request response contains the digest of the underlying manifest. So we\ncan make a HEAD request to determine if the manifest we have cached in the\nDependency Proxy is up to date.\n\n\n```shell\n\ncurl --head -H \"Authorization: Bearer $TOKEN\"\nhttps://registry-1.docker.io/v2/library/alpine/manifests/latest\n\n\nHTTP/1.1 200 OK\n\nContent-Length: 2782\n\nContent-Type: application/vnd.docker.distribution.manifest.v1+prettyjws\n\nDocker-Content-Digest:\nsha256:a126728cb7db157f0deb377bcba3c5e473e612d7bafc27f6bb4e5e083f9f08c2\n\nDocker-Distribution-Api-Version: registry/2.0\n\nEtag:\n\"sha256:a126728cb7db157f0deb377bcba3c5e473e612d7bafc27f6bb4e5e083f9f08c2\"\n\nDate: Wed, 15 Dec 2020 03:34:24 GMT\n\nStrict-Transport-Security: max-age=31536000\n\nRateLimit-Limit: 100;w=21600\n\nRateLimit-Remaining: 72;w=21600\n\n```\n\n\nThe response even contains information telling us how many requests we have\nremaining within our rate limit. In this example, we see we have 72 out of\n100 remaining.\n\n\nWhen the Dependency Proxy first receives a request for the manifest, it\ndecides whether or not it needs to pull an image from Docker Hub based on a\nfew rules:\n\n\n![Dependency Proxy manifest\ncaching](https://about.gitlab.com/images/blogimages/dependency_proxy_flow_chart.png)\n\n\nThe really great thing about the Dependency Proxy is that you don't have to\ndo anything special to take advantage of these abilities. If you simply\nupdate your CI script with your Dependency Proxy image prefix to something\nlike:\n\n\n```yaml\n\nimage:\ngitlab.com/super-awesome-group/dependency_proxy/containers/node:latest\n\n```\n\n\nThen you will automatically bypass Docker Hub rate limiting and your cache\nwill have the most up-to-date version of each image tag.\n\n\n## CI/CD\n\n\nThe Dependency Proxy makes the most sense as a compliment to CI/CD\npipelines. Rather than pulling directly from Docker Hub, you can use the\nDependency Proxy to speed up your pipelines, avoid rate limiting, and create\nsecurity in case of an upstream outage.\n\n\nAs of 13.9, runners log in to the Dependency Proxy automatically, so you\ndon't need to explicitly log in unless you want to for reasons like using\nspecific tokens.\n\n\nTo make the Dependency Proxy easier to use, we have added a few predefined\nenvironment variables you can use in your `.gitlab-ci.yml` files.\n\n\n- `CI_DEPENDENCY_PROXY_USER`: A CI user for logging in to the Dependency\nProxy.\n\n- `CI_DEPENDENCY_PROXY_PASSWORD`: A CI password for logging in to the\nDependency Proxy.\n\n- `CI_DEPENDENCY_PROXY_SERVER`: The server for logging in to the Dependency\nProxy.\n\n- `CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX`: The image prefix for pulling\nimages through the Dependency Proxy. This pulls through the top-level group.\n\n- `CI_DEPENDENCY_PROXY_DIRECT_GROUP_IMAGE_PREFIX` (starting in version\n14.3): An alternative image prefix for pulling images through the Dependency\nProxy. This pulls through the subgroup, or direct group the project exists\nin.\n\n\nDepending on how your scripts and pipelines look you can use these variables\nin a variety of ways. If you are manually pulling images in the script using\n`docker pull`, you can log in and pull like this:\n\n\n```yaml\n\n# .gitlab-ci.yml\n\n\ndependency-proxy-pull-master:\n  # Official docker image.\n  image: docker:latest\n  stage: build\n  services:\n    - docker:dind\n  before_script:\n    - docker login -u \"$CI_DEPENDENCY_PROXY_USER\" -p \"$CI_DEPENDENCY_PROXY_PASSWORD\" \"$CI_DEPENDENCY_PROXY_SERVER\"\n  script:\n    - docker pull \"$CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX\"/alpine:latest\n```\n\n\nIf you want to use the Dependency Proxy to pull images defined as `image`\nyaml attributes (the base images of the script), you can [create a custom\nenvironment\nvariable](https://docs.gitlab.com/ee/ci/variables/#custom-cicd-variables)\nnamed `DOCKER_AUTH_CONFIG` with a value of:\n\n\n```yaml\n\n{\n    \"auths\": {\n        \"https://gitlab.com:443\": { # if you are using $CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX, you should explicitely include the port here.\n            \"auth\": \"(Base64 of username:password)\"\n        }\n    }\n}\n\n```\n\n\nYou will need to calculate the base64 value of your credentials. You can do\nthis from the command line:\n\n\n```shell\n\n# The use of \"-n\" - prevents encoding a newline in the password.\n\necho -n \"my_username:my_password\" | base64\n\n\n# Example output to copy\n\nbXlfdXNlcm5hbWU6bXlfcGFzc3dvcmQ==\n\n\n# A personal access token also works!\n\necho -n \"my_username:personal_access_token\" | base64\n\n```\n\n\nOnce you have the custom environment variable defined, you can use the\nDependency Proxy without having to manually log in within your CI script:\n\n\n```yaml\n\n# This is a working script that would publish an NPM package to the GitLab\npackage registry\n\n# if a properly formatted package.json file exists in the project root.\n\nimage: ${CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX}/node:latest\n\n\nstages:\n  - deploy\n\ndeploy:\n  stage: deploy\n  script:\n    - echo \"//gitlab.com/api/v4/projects/${CI_PROJECT_ID}/packages/npm/:_authToken=${CI_JOB_TOKEN}\">.npmrc\n    - npm publish\n```\n\n\n## Support when Docker Hub is offline\n\n\nBy caching all files that make up an image, we also now have the ability to\nkeep pipelines green even if Docker Hub experiences an outage. As long as\nthe Dependency Proxy has the image you are using cached, when it makes the\nHEAD request to check if the cached image is stale or not, if the HEAD\nrequest fails, we will just fall back to the cached image.\n\n\nThanks for reading! If you haven't used the Dependency Proxy yet, [get\nstarted using it\ntoday](https://docs.gitlab.com/ee/user/packages/dependency_proxy/)!\n\n\n## Updates\n\n\nSince this was published in December 2020, there have been many additional\nimprovements and changes to the Dependency Proxy. As a result, some of the\nsuggested approaches in this post have been improved or have become\noutdated. I suggest looking through [the most recent\ndocumentation](https://docs.gitlab.com/ee/user/packages/dependency_proxy/)\nto learn more.\n",[9,695],{"slug":1941,"featured":6,"template":700},"dependency-proxy-updates","content:en-us:blog:dependency-proxy-updates.yml","Dependency Proxy Updates","en-us/blog/dependency-proxy-updates.yml","en-us/blog/dependency-proxy-updates",{"_path":1947,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1948,"content":1954,"config":1960,"_id":1962,"_type":14,"title":1963,"_source":16,"_file":1964,"_stem":1965,"_extension":19},"/en-us/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration",{"title":1949,"description":1950,"ogTitle":1949,"ogDescription":1950,"noIndex":6,"ogImage":1951,"ogUrl":1952,"ogSiteName":685,"ogType":686,"canonicalUrls":1952,"schema":1953},"Deploy a NodeJS Express app with GitLab's Cloud Run integration","This tutorial will show you how to use NodeJS and Express to deploy an application to Google Cloud. This step-by-step guide will have you up and running in less than 10 minutes with the Cloud Run integration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097892/Blog/Hero%20Images/Blog/Hero%20Images/speedlights_speedlights.png_1750097891963.png","https://about.gitlab.com/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Deploy a NodeJS Express app with GitLab's Cloud Run integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah Matthies\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2025-01-13\",\n      }",{"title":1949,"description":1950,"authors":1955,"heroImage":1951,"date":1919,"body":1958,"category":718,"tags":1959},[1956,1957],"Sarah Matthies","Noah Ing","Are you looking to deploy your NodeJS app to Google Cloud with the least\nmaintenance possible? This tutorial will show you how to utilize GitLab’s\nGoogle Cloud integration to deploy your NodeJS app in less than 10 minutes.\n\n\nTraditionally, deploying an application often requires assistance from\nproduction or DevOps engineers. This integration now empowers developers to\nhandle deployments independently. Whether you’re a solo developer or part of\na large team, this setup gives everyone the ability to deploy their\napplications efficiently.\n\n\n## Overview\n\n\n- Create a new project in GitLab\n\n- Set up your NodeJS application\n\n- Use the Google Cloud integration to create a Service account\n\n- Use the Google Cloud integration to configure Cloud Run via Merge Request\n\n- Enjoy your newly deployed NodeJS app\n\n- Follow the cleanup guide\n\n\n## Prerequisites\n\n- Owner access on a Google Cloud Platform project\n\n- Working knowledge of JavaScript/TypeScript (not playing favorites here!)\n\n- Working knowledge of GitLab CI\n\n- 10 minutes \n\n\n## Step-by-step guide\n\n\n### 1. Create a new project in GitLab\n\n\nWe decided to call our project `nodejs–express-cloud-run` for simplicity.\n\n\n![Create a new\nproject](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097905106.png)\n\n\n### 2. Upload your NodeJS app or use this example to get started.\n\n\n[Demo](https://gitlab.com/demos/templates/nodejs-cloud-run)\n\n\n**Note:** Make sure to include the `cloud-run` [CI\ntemplate](https://gitlab.com/gitlab-org/incubation-engineering/five-minute-production/library/-/raw/main/gcp/cloud-run.gitlab-ci.yml)\nwithin your project.\n\n\n![cloud-run CI template\ninclude](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097905107.png)\n\n\n### 3. Use the Google Cloud integration to create a Service account.\n\n\nNavigate to __Operate > Google Cloud > Create Service account__.\n\n\n![Create Service account\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750097905109.png)\n\n\nAlso configure the region you would like the Cloud Run instance deployed to.\n\n\n![Cloud Run instance deployment region\nselection](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097905113.png)\n\n\n### 4. Go to the Deployments tab and use the Google Cloud integration to\nconfigure __Cloud Run via Merge Request__.\n\n\n![Deployments - Configuration of Cloud Run via Merge\nRequest](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097905115.png)\n\n\nThis will open a merge request – immediately merge it.\n\n\n![Merge request for\ndeployment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750097905117.png)\n\n\n__Note:__ `GCP_PROJECT_ID`, `GCP_REGION`, `GCP_SERVICE_ACCOUNT`, and\n`GCP_SERVICE_ACCOUNT_KEY` will all be automatically populated from the\nprevious steps.\n\n\n![Variables\nlisting](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097905118.png)\n\n\n### 5. Voila! Check your pipeline and you will see you have successfully\ndeployed to Google Cloud Run using GitLab CI.\n\n\n![Successful deployment to Google Cloud\nRun](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097905119.png)\n\n\nClick the Service URL to view your newly deployed Node server.\n\n\n![View newly deployed Node\nserver](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750097905120.png)\n\n\nIn addition, you can navigate to __Operate > Environments__ to see a list of\ndeployments for your environments.\n\n\n![Environments view of deployment\nlist](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image12_aHR0cHM6_1750097905121.png)\n\n\nBy clicking on the environment called `main`, you’ll be able to view a\ncomplete list of deployments specific to that environment.\n\n\n![Main view of deployments to specific\nenvironment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750097905122.png)\n\n\n### 6. Next steps\n\n\nTo get started with developing your Node application, try adding another\nendpoint. For instance, in your `index.js` file, you can add a **/bye**\nendpoint as shown below:\n\n\n```\n\napp.get('/bye', (req, res) => {\n  res.send(`Have a great day! See you!`);\n});\n\n\n```\n\n\nPush the changes to the repo, and watch the `deploy-to-cloud-run` job deploy\nthe updates. Once it’s complete, go back to the Service URL and navigate to\nthe **/bye** endpoint to see the new functionality in action.\n\n\n![Bye\nmessage](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750097905123.png)\n\n\n## Follow the cleanup guide\n\n\nTo prevent incurring charges on your Google Cloud account for the resources\nused in this tutorial, you can either delete the specific resources or\ndelete the entire Google Cloud project. For detailed instructions, refer to\nthe [cleanup guide\nhere](https://docs.gitlab.com/ee/tutorials/create_and_deploy_web_service_with_google_cloud_run_component/#clean-up).\n\n\n> Read more of these helpful [tutorials from GitLab solutions\narchitects](https://about.gitlab.com/blog/tags/solutions-architecture/).\n",[9,939,232,1127,917],{"slug":1961,"featured":91,"template":700},"deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration","content:en-us:blog:deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration.yml","Deploy A Nodejs Express App With Gitlabs Cloud Run Integration","en-us/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration.yml","en-us/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration",{"_path":1967,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1968,"content":1974,"config":1980,"_id":1982,"_type":14,"title":1983,"_source":16,"_file":1984,"_stem":1985,"_extension":19},"/en-us/blog/developer-intro-sast-dast",{"title":1969,"description":1970,"ogTitle":1969,"ogDescription":1970,"noIndex":6,"ogImage":1971,"ogUrl":1972,"ogSiteName":685,"ogType":686,"canonicalUrls":1972,"schema":1973},"SAST & DAST: Key security tests for development workflows","Bolster your code quality with static and dynamic application security testing. Learn why you need SAST and DAST for your projects.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680714/Blog/Hero%20Images/intro-developer-sast-dast.jpg","https://about.gitlab.com/blog/developer-intro-sast-dast","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why you need static and dynamic application security testing in your development workflows\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-08-12\",\n      }",{"title":1975,"description":1970,"authors":1976,"heroImage":1971,"date":1977,"body":1978,"category":1040,"tags":1979},"Why you need static and dynamic application security testing in your development workflows",[1895],"2019-08-12","\n\nDevOps is a quickly growing practice for companies in almost every market. With\nthe influx of cyber attacks over the past decade, security has slowly crept\nforward in the SDLC to the point where we’re now hearing the term [DevSecOps](/blog/announcing-gitlab-devsecops/) in developer circles.\n\nTo keep things tidy and help developers manage additional security\nresponsibilities, tools for static and dynamic [application security](/topics/devsecops/) testing\n(SAST and DAST) have made their way into the fray. In this post, we’ll\nexplain what SAST and DAST are, how they fit into developers’ workflows, and\nwhen they should be used.\n\n## What is application security testing (AST)?\n\nApplication security testing (AST) refers to the process of testing code to make sure it is free of vulnerabilities. There are many ways to test code, though static application security testing (SAST) and dynamic application security testing (DAST) are two of the more well-known options. \n\nApplication security testing has traditionally been a manual (and time-consuming) process, but the growing popularity of DevOps and the risk of insecure code have driven the majority of development teams to automate at least some of the processes. These days, most organizations use a variety of security testing tools to complete AST.\n \n## What are SAST and DAST?\n\nWhat are SAST and DAST? As previously mentioned, under the AST umbrella, there live two different security testing approaches: SAST and DAST. Though different, neither is better than the other and the security \ntesting outcome is superior when both are used together to detect security vulnerabilities in web applications and source code. SAST is a security testing approach that is performed on the application's code, while DAST is an approach that is performed on the running application. Both SAST and DAST are \nessential components of a comprehensive security testing strategy for software applications.\n\nIn summary, SAST and DAST help to ensure that computer systems are both safe and secure. These security measures help make sure that information is protected from hackers and other people who may try to steal it. They are critical tools for successful DevSecOps. Each runs a set\nof automated tests, and both introduce security at the beginning of the\nsoftware development lifecycle.\n\n### Static application security testing (SAST)\n\n[SAST](https://docs.gitlab.com/ee/user/application_security/sast/) can\nbe used to analyze source code for known vulnerabilities – and is also a type\nof white box testing. The test will run before your code is deployed, ensuring\nthat developers are alerted to fixes during the development phase.\nSAST can help remediate situations where your code has a potentially dangerous\nattribute in a class or unsafe code that can lead to unintended code execution.\n\n![An example of a SAST summary within a GitLab merge request](https://about.gitlab.com/images/secure/sast.png){: .shadow.medium.center}\n\nWithin GitLab, SAST will automatically generate a summary of fixes and unresolved\nvulnerabilities following every code commit, but before your code is merged to the target\nbranch. Tools that allow SAST reports to sit within the developer’s work\ninterface enable ease of remediation and streamline testing procedures within\nthe development phase.\n\nSAST takes an inside-looking-out approach, looking for security problems that might have been missed during source code development. It is effective when used after development is complete but before the finished project (and any missed security vulnerabilities) is deployed. Lots of developers nowadays integrate SAST testing into their CI/CD pipelines.\n\n### Dynamic application security testing (DAST)\n\n[DAST](https://docs.gitlab.com/ee/user/application_security/dast/), a\ntype of black box testing, analyzes your running web applications or known\nruntime vulnerabilities. GitLab’s DAST tool runs live attacks on a review app\nduring QA, meaning developers can iterate on new apps and updates earlier and\nfaster.\n\nAs with SAST, DAST should auto-run so that the developer doesn’t have to take measures to initiate the test. In other situations, DAST can also be used to\ncontinuously monitor live web applications for issues like cross-site scripting\nor broken authentication flaws. Test results should inform developers of\npotential vulnerabilities and serve as a catalyst for ongoing updates.\n\nDAST tools help you see your web application through the eyes of a hacker in a deployed environment. It constantly scans for security vulnerabilities during web application runtime, as well as checking the other API or web services that your application connects to. This makes DAST excellent for testing your complete IT environment where your application or web services run.\n\n## Test early and often using SAST and DAST\n\nStatic and dynamic application security testing are two helpful tools to keep\nyour code secure, but don’t rely on them to handle all of your security needs.\nIt’s still important to do manual code reviews, test high-level behaviors and\nfunctionality, conduct database scanning, and ensure that your whole team is\noperating with a security-first mindset.\n\nCover image by [Mikael Kristenson](https://unsplash.com/@mikael_k?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[874,697,9],{"slug":1981,"featured":6,"template":700},"developer-intro-sast-dast","content:en-us:blog:developer-intro-sast-dast.yml","Developer Intro Sast Dast","en-us/blog/developer-intro-sast-dast.yml","en-us/blog/developer-intro-sast-dast",{"_path":1987,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1988,"content":1994,"config":1999,"_id":2001,"_type":14,"title":2002,"_source":16,"_file":2003,"_stem":2004,"_extension":19},"/en-us/blog/developers-write-secure-code-gitlab",{"title":1989,"description":1990,"ogTitle":1989,"ogDescription":1990,"noIndex":6,"ogImage":1991,"ogUrl":1992,"ogSiteName":685,"ogType":686,"canonicalUrls":1992,"schema":1993},"4 Ways developers can write secure code with GitLab","GitLab Secure is not just for your security team – it’s for developers too. Learn four ways to write secure code with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666895/Blog/Hero%20Images/developers-write-secure.jpg","https://about.gitlab.com/blog/developers-write-secure-code-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"4 Ways developers can write secure code with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-09-03\",\n      }",{"title":1989,"description":1990,"authors":1995,"heroImage":1991,"date":1996,"body":1997,"category":1040,"tags":1998},[1895],"2019-09-03","\nWriting secure code is a standard part of day-to-day development work, but\nsecurity often appears to be a roadblock instead of a critical piece of the\npuzzle. To make security efforts easier, [GitLab Secure](/stages-devops-lifecycle/secure/)\noffers a number of different tools that help developers identify and remediate vulnerabilities\nwithin their code, _as they’re writing it_. Our goal is to seamlessly integrate\nsecurity into your code writing practices so you’re better able to protect\nyour business from growing cybersecurity threats.\n\n## Testing\n\nThere are a variety of testing tools available to developers within GitLab.\nGenerally, they alert developers to vulnerabilities within their code and report\nthem within the merge request so developers can adjust their code as they\ngo. In addition to the testing methods outlined below, developers can also [use\nother tools outside of GitLab](https://handbook.gitlab.com/handbook/product/gitlab-the-product/#plays-well-with-others) by integrating\nthe results of your scanners with our merge request security reports.\n\n### Static application security testing\n\nOur [static application security testing](https://docs.gitlab.com/ee/user/application_security/sast/index.html)\n(SAST) tool scans the application source code\nand binaries to spot potential vulnerabilities before deployment. It uses open\nsource tools that are installed as part of GitLab. Vulnerabilities are shown\nin-line with every merge request and results are collected and presented as a\nsingle report.\n\n### Secret detection\n\n[Secret detection](https://docs.gitlab.com/ee/user/application_security/sast/#secret-detection)\nwithin GitLab is able to detect secrets and credentials that\nhave been unintentionally pushed to the repository. This check is performed by\na specific analyzer during the SAST job, runs regardless of the programming\nlanguage of your app, and displays results within the SAST report.\n\n### Dynamic application security testing\n\nOur [DAST tool](https://docs.gitlab.com/ee/user/application_security/dast/index.html)\nanalyzes your web application for known runtime\nvulnerabilities. It conducts live attacks against a review app and can be created for every\nmerge request as part of GitLab’s [CI/CD capabilities](/topics/ci-cd/). Users can provide HTTP\ncredentials to test private areas. Vulnerabilities are shown in-line with every\nmerge request.\n\n### Dependency scanning\n\n[Dependency scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/index.html)\nanalyzes external dependencies (e.g. libraries like Ruby gems) for known\nvulnerabilities on each code commit with GitLab CI/CD. This scan relies on open\nsource tools and on the integration with [Gemnasium](https://docs.gitlab.com/ee/user/project/import/index.html)\ntechnology (now part of\nGitLab) to show, in-line with every merge request, vulnerable dependencies\nin need of updating. Results are collected and available as a single report.\nDependency scanning also provides a list of your project’s dependencies with\ndifferent versions for languages and package managers supported by Gemnasium.\n\n### Container scanning\n\nIf you’re using GitLab CI/CD, [container scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/index.html)\nwill let you check Docker images (and containers) for\nknown vulnerabilities in the application environment. Analyze image contents\nagainst public vulnerability databases using the open source tool, [Clair](https://coreos.com/clair/docs/latest/),\nthat\nis able to scan any kind of Docker (or app) image. Vulnerabilities are shown\nin-line with every merge request.\n\n### License management\n\nUpon code commit, project dependencies are reviewed for [approved and blacklisted\nlicenses](https://docs.gitlab.com/ee/user/compliance/license_compliance/index.html)\ndefined by custom policies per project. Software licenses are\nidentified if they are not within policy, and new licenses are also listed if\nthey require a status designation. This scan relies on an open source tool,\nLicenseFinder, and license analysis results are shown in-line for every merge\nrequest for immediate resolution.\n\n### Code quality analysis\n\nWith the help of GitLab CI/CD, you can analyze your source code quality using\nGitLab [Code Quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html).\nCode Quality uses [Code Climate Engines](https://codeclimate.com/)\nand runs in pipelines using a Docker image built into the Code Quality\nproject. Once the\nCode Quality job has completed, GitLab checks the generated report, compares the\nmetrics between the source and target branches, and shows the information\nwithin the merge request. With pipelines that enable concurrent testing and\nparallel execution, teams quickly receive insight about every commit, allowing\nthem to deliver higher quality code faster.\n\n### The Security Dashboard\n\nSecurity dashboards in GitLab exist at both the project and group level. The\ngroup dashboard provides an overview of all the security vulnerabilities in your\ngroups and projects. In the dashboard, developers are able to drill down into a\nvulnerability for further details, see which project it comes from and the file\nit’s in, and view various metadata to help analyze the risk.\n\nThe dashboard also allows viewers to\n[interact with vulnerabilities](https://docs.gitlab.com/ee/user/application_security/index.html#interacting-with-the-vulnerabilities)\nby creating an issue for them or dismissing them. For ease of use, vulnerabilities\nwithin the group Security Dashboard can be filtered by severity, confidence, report type, and project.\n\nIn addition to the vulnerability overview, the group Security Dashboard also\nprovides a timeline that displays how many open vulnerabilities your projects\nhad at various points in time. While security scans are automatically run for\neach code update, you’ll have some default branches that are infrequently\nupdated. To keep your Security Dashboard up to date on those branches, you can\nuse GitLab to [configure a scheduled pipeline](https://docs.gitlab.com/ee/ci/pipelines/schedules.html)\nto run a daily security scan.\n\n## What’s next for GitLab Secure?\n\nWhile we already have a number of ways to help you write secure code and build\nsecure products and services, we’re always looking for ways to give you more.\nHere are a few of the things we’re working on:\n\n### Interactive application security testing\n\nInteractive application security testing (IAST) checks the runtime behavior of applications by\ninstrumenting the code and\nchecking for error conditions. It is composed by an agent that lives inside the\napplication environment, and an external component, like DAST, that can interact\nand trigger unintended results.\n\n### Fuzzing\n\n[Fuzzing](/direction/secure/dynamic-analysis/fuzz-testing/)\nis a testing technique focused on finding flaws and vulnerabilities in\napplications by sending arbitrary payloads instead of valid input. The idea is to\ntrigger exceptions and unintended code paths that may lead to crashes and\nunauthorized operations. Once a possible problem – like a crash – is found,\nattackers can attempt to find the exact conditions needed to trigger the bug\nand see if they can be fine-tuned to obtain a useful result. (It is worth noting\nthat fuzzing is primarily intended for security teams because it requires more\ntime to execute. While fuzzing is a useful testing method, it should not be a\ndevelopment blocker).\n\n### Vulnerability database\n\nGitLab integrates access to proprietary and open source application security\nscanning tools. In order to maintain the efficacy of those scans, we strive to\nkeep their underlying vulnerability databases up to date.\n\n### Auto remediation\n\nVulnerabilities that require manual intervention to create a fix and push it to\nproduction have a time window where attackers have the ability to leverage the\nvulnerability. Auto remediation aims to automate the vulnerability solution flow and\nautomatically create a fix. The fix is then tested, and if it passes all the\ntests already defined for the application, it is deployed to production.\n\nPhoto by [Daniel McCullough](https://unsplash.com/@d_mccullough?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non [Unsplash](https://unsplash.com/search/photos/write?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[697,721,9,1489,874],{"slug":2000,"featured":6,"template":700},"developers-write-secure-code-gitlab","content:en-us:blog:developers-write-secure-code-gitlab.yml","Developers Write Secure Code Gitlab","en-us/blog/developers-write-secure-code-gitlab.yml","en-us/blog/developers-write-secure-code-gitlab",{"_path":2006,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2007,"content":2013,"config":2019,"_id":2021,"_type":14,"title":2022,"_source":16,"_file":2023,"_stem":2024,"_extension":19},"/en-us/blog/devops-on-the-edge-a-conversation-about-gitlab-and-arm",{"title":2008,"description":2009,"ogTitle":2008,"ogDescription":2009,"noIndex":6,"ogImage":2010,"ogUrl":2011,"ogSiteName":685,"ogType":686,"canonicalUrls":2011,"schema":2012},"DevOps on the edge: Upcoming collaborations between GitLab and Arm","Check out the latest news from the technical evangelist team about upcoming initiatives from GitLab and Arm.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682923/Blog/Hero%20Images/gitlab-arm-collaboration.jpg","https://about.gitlab.com/blog/devops-on-the-edge-a-conversation-about-gitlab-and-arm","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevOps on the edge: Upcoming collaborations between GitLab and Arm\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Priyanka Sharma\"}],\n        \"datePublished\": \"2019-10-08\",\n      }",{"title":2008,"description":2009,"authors":2014,"heroImage":2010,"date":2016,"body":2017,"category":783,"tags":2018},[2015],"Priyanka Sharma","2019-10-08","\nDevOps has moved from being a trend to an established cornerstone of the software development and delivery lifecycle. Today, the best practices of DevOps are being applied, in new and unique ways, to edge computing. As a board member of the Cloud Native Computing Foundation, I participate in open source communities regularly and over the years, I have collaborated with various folks from Arm because today where there is the edge, there is Arm.\n\nAs the technical evangelism leader at GitLab, I got involved with folks from the Arm project when collaborating on [CNCF.ci](http://cncf.ci). GitLab is a complete [DevOps platform](/solutions/devops-platform/), delivered as a single application. A key component of our product is our CI/CD pipeline that is well loved and used in the industry. Arm, through its market leadership in the mobile and embedded space, is now expanding into infrastructure space for edge-to-cloud applications. There is tremendous potential to grow within this emerging space and offer software developers a frictionless environment to develop innovative software at a rapid pace, securely.\nArm is having their annual conference [Arm TechCon 2019](https://www.armtechcon.com/) this week in San Jose, California, and I thought this is a great opportunity to highlight key projects and activities happening within the ecosystem involving Arm and GitLab:\n\n### GitLab for edge base research projects\n\nEric Van Hensbergen, R&D fellow from Arm's Research team, has been leading an effort to [use GitLab for edge base research projects](https://community.arm.com/developer/research/b/articles/posts/continuous-cross-architecture-integration-with-gitlab) creating multi-architecture images using Docker containers, including running GitLab’s 64-bit Runner on Arm instances on public cloud providers such as Packet Cloud and AWS. You can [access the runner](https://packages.gitlab.com/runner/gitlab-runner) for yourself too!\n\n### Stream processing on the edge\n\nLast month at [GitLab Commit Brooklyn](/blog/wrapping-up-commit/), GitLab’s first ever user conference, Eduardo Silva, principal engineer from Arm Treasure Data, [delivered a talk on the benefits of stream processing on the edge](https://gitlabcommit2019brooklyn.sched.com/event/TPDd/picking-up-speed-logging-stream-processing) in distributed systems using [Fluent Bit](https://fluentbit.io/) (a [Fluentd](https://www.fluentd.org/) open source sub-project).\n\n### Join the CNCF CI Working Group Monthly Meeting\n\nToday, all projects on [CNCF.CI](https://cncf.ci/) are being built and tested on both x86 and Arm architecture inside a Kubernetes test environment hosted on Packet’s bare metal infrastructure. For anyone interested, the working group hosts open meetings every month. More details are available in their [Monthly Meeting doc](https://docs.google.com/document/d/1NA4N6PvNEkHX1yzaDFr19Xlru-amRxNi2pliqudmYNA/edit). It’s a great group and I recommend people attend.\n\nThere are a lot of exciting activities happening in the edge-to-cloud and DevOps space. As a developer evangelist, I know the value Arm brings to the ecosystem and am excited to see the commencement of the GitLab and Arm partnership. More announcements to come in the near future. Stay tuned!",[9,830,232,268],{"slug":2020,"featured":6,"template":700},"devops-on-the-edge-a-conversation-about-gitlab-and-arm","content:en-us:blog:devops-on-the-edge-a-conversation-about-gitlab-and-arm.yml","Devops On The Edge A Conversation About Gitlab And Arm","en-us/blog/devops-on-the-edge-a-conversation-about-gitlab-and-arm.yml","en-us/blog/devops-on-the-edge-a-conversation-about-gitlab-and-arm",{"_path":2026,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2027,"content":2033,"config":2039,"_id":2041,"_type":14,"title":2042,"_source":16,"_file":2043,"_stem":2044,"_extension":19},"/en-us/blog/devops-tool-landscape",{"title":2028,"description":2029,"ogTitle":2028,"ogDescription":2029,"noIndex":6,"ogImage":2030,"ogUrl":2031,"ogSiteName":685,"ogType":686,"canonicalUrls":2031,"schema":2032},"The DevOps tool landscape","Competitive intelligence manager Mahesh Kumar describes the criteria we use when comparing GitLab to other DevOps tools.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670008/Blog/Hero%20Images/devops-tool-landscape.jpg","https://about.gitlab.com/blog/devops-tool-landscape","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The DevOps tool landscape\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mahesh Kumar\"},{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-11-01\",\n      }",{"title":2028,"description":2029,"authors":2034,"heroImage":2030,"date":2036,"body":2037,"category":1040,"tags":2038},[2035,715],"Mahesh Kumar","2019-11-01","\nOne of the [core values](https://handbook.gitlab.com/handbook/values/) at GitLab is transparency, and it is in this spirit that we evaluate and articulate how GitLab fits into the competitive landscape. One of the ways we’ve demonstrated this transparency is by [listing other DevOps tools](/competition/) on our website and how they compare to functionality in GitLab. This approach is a little unorthodox but we believe this transparency not only helps teams make the right decisions, it also helps us identify where we can improve our product.\n\nFor any competitive comparison to be effective, it has to be fair, accurate, and easy to understand. Whether we’re comparing [three versions of Jenkins](/blog/jenkins-one-year-later/) to GitLab CI/CD, or comparing other [DevOps tools](/topics/devops/devops-tools-explained/) in the SDLC, we try to ensure these three key objectives of competitive comparisons are achieved.\n\n## Staying fair\n\nOne of the biggest challenges in competitive comparisons is staying fair and credible. The selection of competitive comparison criteria plays a significant role because it has to be comprehensive and not self-serving. Far too often vendors restrict competitive comparison criteria to what their product does well and avoid the gaps that might be in their products. At GitLab, we make a concerted effort to avoid this pitfall, and our culture of transparency keeps us honest in our assessment of where we excel and where we can do better.\n\nThe [GitLab Maturity Framework](/direction/maturity/) articulates the stages, categories, and features that constitute the end-to-end DevOps lifecycle. The maturity framework shows where GitLab provides an elevated user experience and also outlines our planned roadmap for the future. Since this framework takes a long-term view of criteria/features that constitute various DevOps stages and categories, we use this framework as a guide for our competitive comparisons.\n\nIn our GitLab Maturity Framework, we have a few categories where we rank as one of the best-in-class, both with industry analysts and GitLab users: Source code management, code review, and continuous integration (CI). To see one of these comparisons, check out our Jenkins CI page where we outline features, pricing, and a comprehensive overview.\n\n[Jenkins vs. GitLab](/solutions/jenkins/)\n{: .alert .alert-gitlab-purple .text-center}\n\n## Keeping it accurate\n\nHaving settled on criteria for evaluation, getting the data accurate is a major challenge. We have a structured information gathering process as laid out below:\n\n    1. Website\n    2. Documentation\n    3. Demos\n    4. Product install and usage\n    5. Customer feedback\n\nSometimes we are unable to complete this process for all vendor products for several reasons. First is the lack of available information either on a vendor's website or documentation. Second, we may be unable to access their product to validate certain capabilities. Some vendors do not provide a free or easily accessible version of the product, while others may explicitly prohibit the use of their product for comparison purposes. In either case, we restrict our comparison to publicly available details.\n\nThe second challenge in ensuring accuracy is that vendors don't always put out new releases and capabilities on a constant basis and our analysis may be slightly outdated. One of the best examples of this is, “when does one stop [painting the Golden Gate Bridge](http://goldengatebridge.org/research/facts.php#PaintHowOften)?” The answer is never! It’s an ongoing process that requires continuous paint touch-ups from one end to the other.\n\n## Everyone can contribute\n\nOur open source DNA extends to how we manage the tools landscape pages. We freely solicit input internally from multiple teams within GitLab and more importantly from other vendors’ teams. Anyone, including other vendors, can use GitLab to create an issue stating the change they wish to see or information they would like to correct. This issue is then assigned to the appropriate GitLab team to address. In fact, one Product Manager from a vendor recently contacted us about a change to their comparison page, and we gladly made that change.\n\nBy providing an opportunity to comment and give feedback, we hope to foster a dialog with those better informed about different products, thereby improving the tools landscape pages with rich and accurate information.\n\n## Easy to understand\n\nThe final challenge in comparison pages is to make them easy to interpret. We do this in two different ways: First, all the feature-level comparison is listed in the comparison page. For those interested in a particular feature or capability, they can easily scan the page to find the feature they’re looking for.\n\nSometimes the feature details need explanation, or perhaps there’s a feature that doesn’t quite fit into the “yes or no” mold. For that reason, we also provide a top-down analysis at the start of most comparison pages that provides a summary of features and provides additional context. This sometimes means a critical feature can get lost in the text, but we are doing our best to keep consistency across vendors and identify discrepancies quickly.\n\nThere are a lot of DevOps tools out there. As a complete [DevOps platform](/solutions/devops-platform/) delivered as a single application, GitLab can remove the pain of having to choose, integrate, learn, and maintain the multitude of tools necessary for a successful DevOps toolchain. If a DevOps tool is missing, feel free to [email us](mailto:incoming+gitlab-com-marketing-product-marketing-7424125-issue-@incoming.gitlab.com?subject=DevOps%20tool%20request&amp;amp;bcc=devopstools%40gitlab.com&amp;amp;body=-%20Tool%20name%3A%0D%0A-%20Stages%3A%0D%0A-%20Change%3A%0D%0A%0D%0A%0D%0APlease%20leave%20these%20label%20flags.%20%20%20%20%0D%0A%2Flabel%20~comparison%20~Servicedesk) or [create an issue](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#create-a-new-issue) and we’ll be happy to add a feature comparison for that product.\n\nCover image by [Troy Nikolic](https://unsplash.com/@troynikolic?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,721,1064],{"slug":2040,"featured":6,"template":700},"devops-tool-landscape","content:en-us:blog:devops-tool-landscape.yml","Devops Tool Landscape","en-us/blog/devops-tool-landscape.yml","en-us/blog/devops-tool-landscape",{"_path":2046,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2047,"content":2053,"config":2058,"_id":2060,"_type":14,"title":2061,"_source":16,"_file":2062,"_stem":2063,"_extension":19},"/en-us/blog/devsecops-survey-released",{"title":2048,"description":2049,"ogTitle":2048,"ogDescription":2049,"noIndex":6,"ogImage":2050,"ogUrl":2051,"ogSiteName":685,"ogType":686,"canonicalUrls":2051,"schema":2052},"Our 2020 DevSecOps Survey found faster releases and changing roles","Nearly 3700 software pros shared their DevOps successes, failures and thoughts on the future. Here’s what you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663975/Blog/Hero%20Images/devsecopssurvey.png","https://about.gitlab.com/blog/devsecops-survey-released","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Our 2020 DevSecOps Survey found faster releases and changing roles\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-05-18\",\n      }",{"title":2048,"description":2049,"authors":2054,"heroImage":2050,"date":2055,"body":2056,"category":1062,"tags":2057},[1037],"2020-05-18","\n_Our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\nIn February 2020, nearly 3700 DevOps practitioners from 21 countries shared, often in their own words, the reality of their software development journeys. They told us DevOps works for them: Nearly 83% said they’re releasing code faster and about 60% are deploying code either multiple times a day, daily, or every other day. But they also offered details of a less obvious but perhaps more important shift – their roles are changing, in some cases dramatically, because of DevOps.\n\nAlthough this survey was completed before today’s unprecedented economic upheaval, we think the insights in our [2020 Global DevSecOps Survey](/developer-survey/) may help you get a greater understanding of real world DevOps and the way job responsibilities are changing for developers, security pros, operations team members, and testers.\n\n## Dev + Ops\n\nWhy are developers releasing code more quickly with DevOps? For starters, they’re adding some of the key DevOps components including CI, SCM, automated testing, and CD.\n\n_\"Pre-deployment tests have provided more confidence that the product is ready to be released, also delivery frequency has increased.\"_\n\nBut the technology changes only tell part of the story. Traditional operations-type duties like provisioning or maintaining environments are increasingly part of development responsibilities. Over 34% of developers say they define and/or create the infrastructure their app runs on.\n\n_\"Deployment has become a non-task. Bootstrapping new projects is 10x faster because of the reusable infrastructure.\"_\n\nDevelopers say they’re no longer doing lots of hands-on tasks – like manual testing, deployments or merging – but they are increasingly responsible for security. In fact 28% say they’re now solely responsible for security in their organizations, a clear sign that security is beginning to \"shift left\" in a material way.\n\n_\"Security varies project to project. DevOps is usually tasked with 'protecting' our environments. We devs try to follow industry standards code-wise.\"_\n\n## An uneasy alliance\n\nAlthough security remains a work in progress at many if not most organizations, there are a few signs that [DevSecOps](/solutions/security-compliance/) is actually happening. Security professionals report that they are (finally) part of cross-functional teams and are working more closely with developers than ever before.\n\n_\"(Security) is becoming less focused into silo positions and more of a jack of all trades role.\"_\n\nIn fact 65% of security teams say their organizations have \"shifted left\" though, when we drilled down to find out what that actually meant, the details became much less clear. Fewer than 19% put SAST scan results into a pipeline report a developer can access and dynamic application security testing (DAST) fares even worse – less than 14% of companies give developers access to those reports.\n\nAt the same time, security teams continue to report that developers don't find enough bugs early enough in the process and/or that they’re reluctant to fix them when they are discovered.\n\nTo add to the confusion, 33% of security pros say they’re solely responsible for security in their organizations. But nearly the same percentage – 29% – say *everyone* is responsible. The ideal, of course, is what was shared by one survey taker:\n\n_\"We don’t have separate security, developers and operations; we are DevSecOps (and more).\"_\n\n## In the clouds\n\nOperations is often the place where the proverbial rubber hits the road and that’s particularly true with DevOps. In fact over 60% of operations team members report their roles are changing thanks to DevOps.\n\nWhat do these new roles look like?\n\n_\"Ops is 60% new project work and 40% operations/fire-fighting/developer support.\"_\n\n_\"We ensure reliability and availability, improve developer efficiency, automation, tools, and observability.\"_\n\n_\"We keep the lights on.\"_\n\n_\"(Ops today is) anything between dev and ops. From planning to deployment but not monitoring and maintaining apps in production.\"_\n\nToday 42% of operations team members see their role as primarily managing hardware and infrastructure, while 52% say their first priority is managing cloud services.\n\n## The trouble with test\n\nFor the second year in a row our survey takers have pointed squarely to testing as the number one reason releases are delayed. Last year 49% said test was at fault; this year it was 47%.\n\nBut there are small signs of change. Almost three-quarters of organizations report they have shifted testing left, meaning they’ve moved it earlier into the development process. What does that actually mean? Approximately 31% said developers test some of their code and 25% said automated testing happens as code is being written. About 17% said dev and test work as a team to test \"as close to real time as possible,\" and about 9% said they practice test-driven development (TDD).\n\n_\"We do TDD. QA and dev act as a team. We have automated tests running parallel with developing code.\"_\n\nLike security, testers say they are now much more involved in the development process. Nearly 30% said they’re working more closely with developers, and 16% said they have \"a more visible seat at the table.\" And just over 15% said that thanks to DevOps, they’re much more likely to be able to \"test what matters.\"\n\n_\"We have to write less paper and tickets and have faster reaction times.\"_\n\n_\"We’re all the same – dev team is the ops team.\"_\n\n_\"We’re starting to see light at the end of the tunnel.\"_\n\n## Looking forward\n\nOur respondents had a big list of areas they hope to focus on for the future from automation to CI/CD and even going more deeply into DevOps. DevOps and lifelong learning clearly go hand in hand.\nBut let’s end on a high note. We asked developers how prepared they are for the future: 71% said prepared or very prepared, while less than 25% said \"not very prepared.\" But we like this comment left from one developer, who has the lifelong learning baked in:\n\n_\"I’m only prepared because I constantly keep tinkering on the side.\"_\n\n_Our [2022 Global DevSecOps Survey](/developer-survey/) has the latest insights from over 5,000 DevOps professionals. You can also compare it with [previous year surveys](/developer-survey/previous/)_\n",[721,1269,9],{"slug":2059,"featured":6,"template":700},"devsecops-survey-released","content:en-us:blog:devsecops-survey-released.yml","Devsecops Survey Released","en-us/blog/devsecops-survey-released.yml","en-us/blog/devsecops-survey-released",{"_path":2065,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2066,"content":2072,"config":2077,"_id":2079,"_type":14,"title":2080,"_source":16,"_file":2081,"_stem":2082,"_extension":19},"/en-us/blog/directed-acyclic-graph",{"title":2067,"description":2068,"ogTitle":2067,"ogDescription":2068,"noIndex":6,"ogImage":2069,"ogUrl":2070,"ogSiteName":685,"ogType":686,"canonicalUrls":2070,"schema":2071},"Get faster and more flexible pipelines with a Directed Acyclic Graph","A Directed Acyclic Graph will let you run pipeline steps out of order, break the stage sequencing, and allow jobs to relate to each other directly.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681298/Blog/Hero%20Images/james-eades-bfwhP9xodvE-unsplash.jpg","https://about.gitlab.com/blog/directed-acyclic-graph","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get faster and more flexible pipelines with a Directed Acyclic Graph\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2020-05-12\",\n      }",{"title":2067,"description":2068,"authors":2073,"heroImage":2069,"date":2074,"body":2075,"category":718,"tags":2076},[1835],"2020-05-12","\n\nRecently GitLab released an exciting feature that reduces the pipeline running times and enables more flexibility in the order jobs are running. The feature, Directed Acyclic Graph (DAG), is free and available on GitLab.com and the self-managed versions. \n\n### Pipeline Jobs and Stages \n\nIn a typical [CI/CD pipeline](/topics/ci-cd/) you have multiple stages, which represent an automation of the [DevOps process](/topics/devops/) such as build, test, package, config, and deploy. Each stage is made up of one or more jobs. In the [CI/CD configuration file, .gitlab-ci.yml](https://docs.gitlab.com/ee/ci/quick_start/#what-is-gitlab-ciyml) you define the order of your stages. Usually the the pipeline will start with the build jobs; after all build jobs completed, test jobs will start, then jobs from the next stage will run, and so on. \n\nWhile this order makes a lot of sense, in some cases this might slow down the overall execution time. Imagine the build stage consists of task A which completes in 1 min, and task B which is very slow (say 5 mins). Task C is in the test stage but it depends on task A only. Still, task C must wait 5 minutes before it can be executed, resulting in a waste of 4 minutes.  \n\n![stage sequencing ](https://about.gitlab.com/images/blogimages/DAG/pipeline_diagram.png){: .shadow.medium.center}\n\n### Meet Directed Acyclic Graph\n\nDAG will allow you to run pipeline steps out of order, breaking the stage sequencing and allowing jobs to relate to each other directly no matter which stage they belong to. \n\nWith DAG, jobs can start to run immediately after their dependent jobs completed even if some jobs in the previous stage are still running. This new feature speeds up the CI/CD process and helps complete the deployment sooner.  \n\nIn the below example, a project generates both Android, iOS, and web apps in a multi-stage pipeline. The iOS tests started as soon as the iOS build passed rather than waiting for all the Android and web builds to pass too. It was the same for the iOS deployment – it completed after the iOS tests passed without waiting for the other test to complete. The total compute time might be the same, but the wall-clock time is different. In more complicated cases, it's possible to significantly reduce the overall wall-clock time of the pipeline by declaring exactly which jobs depend on which other jobs.\n\n![Build, test and deploy stages](https://about.gitlab.com/images/blogimages/DAG-blog.png){: .shadow.medium.center}\n\n### Defining dependent jobs\n\nThe .gitlab-ci.yml file introduces a new keyword: [needs](https://docs.gitlab.com/ee/ci/yaml/#needs) which gets a parameter on an array of jobs that it depends on. \n\n```\nios:\n  stage: build\n  script:\n    - echo \"build ios...\"\n\n\nios_test:\n  stage: test\n  script:\n    - echo \"test something...\"\n  needs: [\"ios\"]\n```\n\nThe ```ios_test``` job, which is part of the ```test``` stage, will start immediately after the ```ios``` job, which is in the ```build``` stage, and it will complete regardless of the status of other jobs in the ```build``` stage. \n\n### Where is it useful? \n\nThis can be valuable for the increasingly popular [monorepo](https://en.wikipedia.org/wiki/Monorepo) pattern where you have different folders in your repo that can build, test, and maybe even deploy independently, just like in the above example where the iOS, Android and web apps can be built, test and deployed individually. \n\nAnother usage could be when your pipeline contains some heavy tests that take a lot of time to execute. It would make more sense to start those tests as soon as possible, rather than wait for not relevant tasks to complete and only then start them. \n\n### You can also watch a demo of DAG  below:\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/9EHcQd3x_Sw\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\u003C/figure>\n\nCover image by [James Eades](https://unsplash.com/photos/bfwhP9xodvE) on [Unsplash](https://unsplash.com/)\n{: .note}\n\n\n",[9,999],{"slug":2078,"featured":6,"template":700},"directed-acyclic-graph","content:en-us:blog:directed-acyclic-graph.yml","Directed Acyclic Graph","en-us/blog/directed-acyclic-graph.yml","en-us/blog/directed-acyclic-graph",{"_path":2084,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2085,"content":2091,"config":2098,"_id":2100,"_type":14,"title":2101,"_source":16,"_file":2102,"_stem":2103,"_extension":19},"/en-us/blog/docker-in-docker-with-docker-19-dot-03",{"title":2086,"description":2087,"ogTitle":2086,"ogDescription":2087,"noIndex":6,"ogImage":2088,"ogUrl":2089,"ogSiteName":685,"ogType":686,"canonicalUrls":2089,"schema":2090},"Update: Changes to GitLab CI/CD and Docker in Docker with Docker 19.03","If you are using the Docker in Docker workflow you may need to enable TLS or explicitly turn it off.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663397/Blog/Hero%20Images/logoforblogpost.jpg","https://about.gitlab.com/blog/docker-in-docker-with-docker-19-dot-03","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Update: Changes to GitLab CI/CD and Docker in Docker with Docker 19.03\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Steve Azzopardi\"}],\n        \"datePublished\": \"2019-07-31\",\n      }",{"title":2086,"description":2087,"authors":2092,"heroImage":2088,"date":2094,"body":2095,"category":1062,"tags":2096},[2093],"Steve Azzopardi","2019-07-31","\n\nLast week Docker released a new version,\n[19.03](https://docs.docker.com/engine/release-notes/#19030), which\nbrings a few exciting features with it.\n\nOne of the features affects GitLab CI/CD when using the [Docker in Docker\nworkflow](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#use-docker-in-docker-executor).\nAs of version 19.03, [`docker:dind`](https://hub.docker.com/_/docker)\nwill automatically generate TLS certificates and require using them for\ncommunication. This is from [Docker's official\ndocumentation](https://hub.docker.com/_/docker#tls):\n\n> Starting in 18.09+, the dind variants of this image will automatically generate TLS certificates in the directory specified by the DOCKER_TLS_CERTDIR environment variable.\n> Warning: in 18.09, this behavior is disabled by default (for compatibility). If you use --network=host, shared network namespaces (as in Kubernetes pods), or otherwise have network access to the container (including containers started within the dind instance via their gateway interface), this is a potential security issue (which can lead to access to the host system, for example). It is recommended to enable TLS by setting the variable to an appropriate value (-e DOCKER_TLS_CERTDIR=/certs or similar). In 19.03+, this behavior is enabled by default.\n\nWhen you upgrade to 19.03 (which is done automatically if using\n`docker:dind`) you may start seeing an issue like:\n\n```\ndocker: Cannot connect to the Docker daemon at tcp://docker:2375. Is the docker daemon running?.\n```\n\nTo fix the problem above you have two options:\n\n1. Configure [GitLab Runner](https://docs.gitlab.com/runner/) to use TLS.\n1. Explicitly turn off TLS.\n\nThe shared Runners available on GitLab.com support both workflows, which\nare described in detail below.\n\nYou may notice that we are now also suggesting a specific version such as\n`docker:19.03.0-dind` and not `docker:dind`. This is to help prevent users'\njobs randomly failing when a new update comes out.\n\n## Configure TLS\n\nSince the service `docker:dind` will create the certificates, we need to\nhave the certificate shared between the service and the job container.\nTo do this we have to add a mount inside of the\n[volumes](https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runnersdocker-section)\nunder the `[runners.docker]` section.\n\nFor example:\n\n```toml\n[[runners]]\n  name = \"My Docker Runner\"\n  url = \"http://gitlab.com\"\n  token = \"\"\n  executor = \"docker\"\n  [runners.custom_build_dir]\n  [runners.docker]\n    privileged = true\n    volumes = [\"/certs/client\", \"/cache\"]\n    shm_size = 0\n```\n\nIf you're a GitLab.com user, we've already done the config change above for you on the\nShared Runners.\n\nAlso, update `.gitlab-ci.yml` accordingly to specify the\n`DOCKER_TLS_CERTDIR`\n\n```yml\nimage: docker:19.03.0\n\nvariables:\n  DOCKER_DRIVER: overlay2\n  # Create the certificates inside this directory for both the server\n  # and client. The certificates used by the client will be created in\n  # /certs/client so we only need to share this directory with the\n  # volume mount in `config.toml`.\n  DOCKER_TLS_CERTDIR: \"/certs\"\n\nservices:\n  - docker:19.03.0-dind\n\nbefore_script:\n  - docker info\n\nbuild:\n  stage: build\n  script:\n    - docker build -t my-docker-image .\n    - docker run my-docker-image /script/to/run/tests\n```\n\n## Disable TLS\n\nYou might not have access to update the volume mounting inside of the\n`config.toml`, so the only option is to disable TLS. You can do this by\nsetting the environment variable `DOCKER_TLS_CERTDIR` to an empty value.\n\nFor GitLab.com Shared Runners users this is done already using the\n[environment settings](https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runners-section),\nwhich works the same way.\n\n```yml\nimage: docker:19.03.0\n\nvariables:\n  DOCKER_DRIVER: overlay2\n  DOCKER_TLS_CERTDIR: \"\"\n\nservices:\n  - docker:19.03.0-dind\n\nbefore_script:\n  - docker info\n\nbuild:\n  stage: build\n  script:\n    - docker build -t my-docker-image .\n    - docker run my-docker-image /script/to/run/tests\n```\n\nWe would like to thank the rest of the community with all the feedback\nand help throughout\n[#4501](https://gitlab.com/gitlab-org/gitlab-runner/issues/4501).\n\n",[2097,9],"releases",{"slug":2099,"featured":6,"template":700},"docker-in-docker-with-docker-19-dot-03","content:en-us:blog:docker-in-docker-with-docker-19-dot-03.yml","Docker In Docker With Docker 19 Dot 03","en-us/blog/docker-in-docker-with-docker-19-dot-03.yml","en-us/blog/docker-in-docker-with-docker-19-dot-03",{"_path":2105,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2106,"content":2112,"config":2118,"_id":2120,"_type":14,"title":2121,"_source":16,"_file":2122,"_stem":2123,"_extension":19},"/en-us/blog/dockerizing-review-apps",{"title":2107,"description":2108,"ogTitle":2107,"ogDescription":2108,"noIndex":6,"ogImage":2109,"ogUrl":2110,"ogSiteName":685,"ogType":686,"canonicalUrls":2110,"schema":2111},"Dockerizing GitLab Review Apps","A GitLab user shows us how to deploy Docker containers as a Review App.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680430/Blog/Hero%20Images/dockerizing-review-apps.jpg","https://about.gitlab.com/blog/dockerizing-review-apps","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Dockerizing GitLab Review Apps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Stephan Hochdörfer\"}],\n        \"datePublished\": \"2017-07-11\",\n      }",{"title":2107,"description":2108,"authors":2113,"heroImage":2109,"date":2115,"body":2116,"category":718,"tags":2117},[2114],"Stephan Hochdörfer","2017-07-11","Last year GitLab introduced the [Review\nApps](https://docs.gitlab.com/ee/ci/review_apps/) feature. Review Apps are app\nenvironments that are created dynamically every time you push a new branch\nup to GitLab. As a bonus point the app environments are automatically\ndeleted when the branch is deleted. Since we moved to using Docker for quite\na few of our projects I was keen on figuring out how to combine Docker and\nthe GitLab Review Apps functionality as the documentation only mentions\nNGINX as a way to run Review Apps. As it turns out, it is rather simple to\ndeploy Docker containers as a Review App.\n\n\n\u003C!-- more -->\n\n\nIn our scenario the GitLab Runner for building the Docker image and the\nGitLab Runner for \"running\" the Review Apps make use of the shell executor,\nthat way we do not have to deal with Docker-in-Docker issues. Besides\ninstalling the gitlab-ci-multi-runner package we also installed Docker and\ndocker-compose.\n\n\nFirst of all, we define two build stages in the .gitlab-ci.yml file – the\nbuild and deploy stage:\n\n\n```html\n\nstages:\n  - build\n  - deploy\n  ```\n\nThe build stage is defined like this:\n\n```html\n\nbuild:\n  tags:\n    - php7\n  stage: build\n  script:\n    - echo \"Building the app\"\n    - composer.phar install\n    - docker build -t myproject/myapp .\n    - docker tag myproject/myapp:latest \\\n      registry.loc/myproject/myapp:$CI_COMMIT_REF_NAME\n    - docker push registry.loc/myproject/myapp:$CI_COMMIT_REF_NAME\n  only:\n  - master\n  ```\n\nThis will create the Docker image and push it to our Sonatype Nexus instance\nwhich serves as a private Docker registry for us. As you can see I make use\nof the $CI_COMMIT_REF_NAME variable when tagging the Docker image. That way,\nwe end up with a Docker image per branch. Downside: you cannot use\ncharacters in the branch name which are no valid Docker version identifiers.\nI still need to figure out a fix for this.\n\n\nThe deploy stage consists of two jobs: one for deploying the container, the\nother for undeploying the container:\n\n\n```html\n\ndeploy_dev:\n  tags:\n    - dev\n  stage: deploy\n  variables:\n    GIT_STRATEGY: none\n  script:\n    - echo \"Deploy to dev.loc\"\n    - docker pull registry.loc/myproject/myapp:$CI_COMMIT_REF_NAME\n    - docker stop reviewapp-demo-$CI_COMMIT_REF_NAME || true\n    - docker rm reviewapp-demo-$CI_COMMIT_REF_NAME || true\n    - docker run -d -P -l traefik.enable=true \\\n      -l traefik.frontend.rule=Host:reviewapp.dev.loc \\\n      -l traefik.protocol=http --name reviewapp-demo-$CI_COMMIT_REF_NAME \\\n      registry.loc/myproject/myapp:$CI_COMMIT_REF_NAME\n  environment:\n    name: dev\n    url: http://reviewapp.dev.loc\n  only:\n  - master\n  ```\n\nWhen this code is run it will simply pull the latest image from the private\nDocker registry and run it. Since the gitlab-runner user will push the image\nto the registry the user needs an account there and needs to be\nauthenticated against the registry. I could not find a way how to configure\nthe registry credentials via the .gitlab.yml file, so I ssh'ed into the\nboxes and manually run a \"docker login registry.loc\" for the gitlab-runner\nuser. Currently we do not have many servers - virtual machines in our case -\nso that approach is fine, but does not scale in the future.\n\n\nWhen running the container we set a fixed name for the container. That way,\nwe can easily stop it when it comes to the undeploy job. We also define some\nTraefik labels as we use Traefik in front of the docker daemon to route the\nrequests. Traefik itself runs in a container as well. The Traefik container\nis launched like this:\n\n\n```html\n\ndocker run -d --restart=always -p 8080:8080 -p 80:80 -p 443:443 \\\n\n-l traefik.enable=false --name=traefik \\\n\n-v /var/run/docker.sock:/var/run/docker.sock \\\n\n-v /etc/traefik/traefik.toml:/etc/traefik/traefik.toml \\\n\n-v /etc/traefik/ssl/cert.key:/etc/traefik/ssl/cert.key \\\n\n-v /etc/traefik/ssl/cert.pem:/etc/traefik/ssl/cert.pem \\\n\ntraefik\n\n```\n\n\nWe do not use any fancy Traefik configuration, just the defaults for the\ndocker backend. Since the Review Apps server runs in our intranet and uses\nour intranet domain name we were not able to use the Let's Encrypt support\nbuilt in Traefik. Instead, we were required to generate a self-signed SSL\ncertificate and mount that in the Traefik container.\n\n\nThe undeploy job is the final piece of the puzzle. GitLab allows you to\nmanually stop Review Apps by clicking a Pause button the GitLab UI. To\nundeploy a Review App we simply stop and remove the container by the defined\nname.\n\n\n```html\n\nundeploy_dev:\n  tags:\n    - dev\n  stage: deploy\n  variables:\n    GIT_STRATEGY: none\n  script:\n    - echo \"Remove review app from dev.loc\"\n    - docker stop reviewapp-demo-$CI_COMMIT_REF_NAME || true\n    - docker rm reviewapp-demo-$CI_COMMIT_REF_NAME || true\n  when: manual\n  environment:\n    name: dev\n    action: stop\n```\n\n\nBoth the deploy_dev and the undeploy_dev job are bound by the tag \"dev\" to\nthe dev server which hosts our docker instances. That way the docker\ninstances will always start on the right server.\n\n\n## About the Author\n\n\n[Stephan Hochdörfer](https://twitter.com/shochdoerfer) currently holds the\nposition of Head of Technology at [bitExpert AG](https://www.bitexpert.de),\na company specializing in software and mobile development. His primary focus\nis everything related to web development as well as automation techniques\nranging from code generation to deployment automation.\n\n\n_This post was originally published on\n[blog.bitexpert.de](https://blog.bitexpert.de/blog/dockerizing-gitlab-review-apps/)._\n\n\n[Cover image](https://unsplash.com/@guibolduc?photo=uBe2mknURG4) by\n[Guillaume Bolduc](https://unsplash.com/@guibolduc) on Unsplash\n\n{: .note}\n",[763,9],{"slug":2119,"featured":6,"template":700},"dockerizing-review-apps","content:en-us:blog:dockerizing-review-apps.yml","Dockerizing Review Apps","en-us/blog/dockerizing-review-apps.yml","en-us/blog/dockerizing-review-apps",{"_path":2125,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2126,"content":2132,"config":2137,"_id":2139,"_type":14,"title":2140,"_source":16,"_file":2141,"_stem":2142,"_extension":19},"/en-us/blog/effective-ci-cd-pipelines",{"title":2127,"description":2128,"ogTitle":2127,"ogDescription":2128,"noIndex":6,"ogImage":2129,"ogUrl":2130,"ogSiteName":685,"ogType":686,"canonicalUrls":2130,"schema":2131},"Want a more effective CI/CD pipeline? Try our pro tips","Here’s how to take your CI/CD pipeline to the next level with hands on advice about faster builds, better security and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681447/Blog/Hero%20Images/cicdpipelines.jpg","https://about.gitlab.com/blog/effective-ci-cd-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Want a more effective CI/CD pipeline? Try our pro tips\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-07-29\",\n      }",{"title":2127,"description":2128,"authors":2133,"heroImage":2129,"date":2134,"body":2135,"category":718,"tags":2136},[1037],"2020-07-29","\n\nNow that your [CI/CD pipeline](/topics/ci-cd/) is up and running, it’s time to fine-tune the performance. This hands on guide will walk you through tweaks that will improve a CI/CD pipeline’s speed, functionality, security, and integration with other tools and platforms.\n\n## Built for speed\n\nCI/CD and DevOps promises faster releases and we know it’s true: Even a basic automated pipeline is much speedier than the old days of manual handoffs. But there are ways to make the CI/CD pipeline even zippier. One straightforward option that guarantees faster builds is to [autoscale runners](/blog/making-builds-faster-autoscaling-runners/). If you have 15 minutes to spare, you can link your GitLab CI pipeline to the [Google Kubernetes engine](/blog/gitlab-ci-on-google-kubernetes-engine/). And it doesn’t get much faster than using the [Auto DevOps option](/blog/guide-to-ci-cd-pipelines/) if you’re setting up a new pipeline from scratch.\n\n## Do more with less\n\nOnce a pipeline is humming along, it’s time to think about tinkering with what you have. This is one of our favorite things to do at GitLab – we even used our CI/D pipeline to [turn our group conversation into a podcast](/blog/group-conversation-podcast/). We had an [unconventional CI/CD journey](/blog/gitlab-journey-to-cicd/), which goes a long way to explaining our overall enthusiasm for this technology.\n\nOur best advice when it comes to an effective CI/CD pipeline is to think outside the box. Need build images? It’s [easy to do](/blog/building-build-images/) with your CI/CD pipeline. You can also [create a cross-project pipeline](/blog/cross-project-pipeline/), or [build a bridge between Rust and Firebase](/blog/python-rust-and-gitlab-ci/).\n\n## Make it secure\n\nIt’s fun to play around with CI/CD functionality, but it’s critical to make sure your pipeline is secure. Start by making sure you [know the threat landscape](/blog/defend-cicd-security/). If you store key data in secrets management service [Vault](https://www.vaultproject.io), here’s how GitLab [makes the integration process easier and safer](/blog/vault-integration-process/).\n\nAnd for Jenkins users, it’s simple to [create deterministic security jobs](https://docs.gitlab.com/ee/integration/jenkins.html) from within GitLab.\n\n## Work with what you have\n\nNo effective CI/CD pipeline exists in a vacuum and to get the most out of yours it’s important to seamlessly integrate with other platforms and tools.\n\nAWS users can [set up multi-account SAM deployments](/blog/multi-account-aws-sam-deployments-with-gitlab-ci/) or [autoscale GitLab CI](/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate/) on Fargate.\n\nTeams working on Android projects can [can create a customized GitLab CI](/blog/setting-up-gitlab-ci-for-android-projects/) easily.\n\nAnd finally it’s possible to take advantage of Google’s Firebase, a backend-as-a-service tool, so you can enable [continuous deployment of database, serverless and apps](/blog/gitlab-ci-cd-with-firebase/).\n\n**Read more about CI/CD:**\n\n* [The four big benefits](/blog/positive-outcomes-ci-cd/) of CI/CD\n\n* [CI/CD challenges](/blog/modernize-your-ci-cd/) to consider\n\n* Everything you need to know about [Auto DevOps](/blog/auto-devops-explained/)\n\nCover image by [Jacek Dylag](https://unsplash.com/@dylu) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9,721,232],{"slug":2138,"featured":6,"template":700},"effective-ci-cd-pipelines","content:en-us:blog:effective-ci-cd-pipelines.yml","Effective Ci Cd Pipelines","en-us/blog/effective-ci-cd-pipelines.yml","en-us/blog/effective-ci-cd-pipelines",{"_path":2144,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2145,"content":2151,"config":2156,"_id":2158,"_type":14,"title":2159,"_source":16,"_file":2160,"_stem":2161,"_extension":19},"/en-us/blog/eight-steps-to-prepare-your-team-for-a-devops-platform-migration",{"title":2146,"description":2147,"ogTitle":2146,"ogDescription":2147,"noIndex":6,"ogImage":2148,"ogUrl":2149,"ogSiteName":685,"ogType":686,"canonicalUrls":2149,"schema":2150},"8 Steps to prepare your team for a DevOps platform migration","Getting teams ready enables them to migrate with more confidence and ease. Here's how to get started.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663786/Blog/Hero%20Images/craftsman-looks-at-continuous-integration.jpg","https://about.gitlab.com/blog/eight-steps-to-prepare-your-team-for-a-devops-platform-migration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"8 Steps to prepare your team for a DevOps platform migration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-08-16\",\n      }",{"title":2146,"description":2147,"authors":2152,"heroImage":2148,"date":2153,"body":2154,"category":741,"tags":2155},[738],"2022-08-16","\nWhen organizations are getting ready to [move to a DevOps platform](https://page.gitlab.com/migrate-to-devops-guide.html), taking the time to get IT teams prepped for the migration will mean people can make the transition with more confidence and efficiency.\n\nBy [replacing a complicated mix of DevOps tools](/topics/devops/use-devops-platform-to-avoid-devops-tax/) with a single, end-to-end DevOps platform, you are about to change the way people work in a fundamental way. That will bring many benefits, like cutting tool-management costs, [increasing security](/blog/one-devops-platform-can-help-you-achieve-devsecops/), speeding software creation and deployment, and [replacing silos with a collaborative environment](/blog/5-ways-collaboration-boosts-productivity-and-your-career/). But any kind of change can create anxiety. By reaching out to people as part of your migration prep, managers can calm those stresses, create champions for the adoption, and ease the work that’s to come. \n\nLet’s look at what IT leaders can do to ease this transition for everyone.\n\n## Build buy-in\n\nStarting at the VP and CIO level, create organization-wide buy-in for this migration. This will be a wide-reaching project so everyone from the C-suite on down needs to be on board. Help them understand the importance of making this move. It’s not about adding a new tool – it’s about improving the way software development works overall, so make sure everyone is invested _from the beginning_. “Management and DevOps teams both need to understand that not migrating will ultimately take up more time and energy because they’d be forced to continue time-consuming glue work and duct taping to keep the toolchain stitched together,\" says [Brendan O’Leary](/company/team/#brendan), staff developer evangelist at GitLab. “People will be doing a lot less of that after a migration.”\n\n> Join us at [GitLab Commit 2022](/events/commit/) and connect with the ideas, technologies, and people that are driving DevOps and digital transformation.\n\n## Find champions\n\nEarly in the process, find your innovators and migration champions. Talk with people on every team to figure out who is excited about adopting a DevOps platform. These people will be critical. Empower them to lead the charge by allowing them to be the first to migrate with your full, visible support. Then their migration successes will serve as inspiration for those less excited to make the move.\n\n## Ease tension\n\nRemember that change makes people nervous and be sensitive to that. Get ahead of any anxieties by laying out how continuing on with their existing (and ever-expanding) [toolchains will only suck up more of their time and efforts](/blog/the-journey-to-a-devops-platform/) because they’ll have to remain focused on juggling a tangle of tools, instead of actually turning plans into software. Toolchains are not the fun part of their jobs, and they’ll be letting go of that.\n\n## Set expectations\n\nTalk with workers about what this will mean for them individually. Reassure them that this does not mean their jobs will be eliminated. However, it will change their day-to-day responsibilities since they’ll be doing less feeding and watering of disparate tools. That will give them more time to take on bigger, more valuable and more interesting projects. Developers, in particular, want to [work on projects that matter](/blog/why-software-developer-job-satisfaction-matters-and-how-to-make-it-happen/). Decreasing the toolchain red tape will be a huge step towards increased job satisfaction. \n\n## Define roles\n\nNot everyone on every team will work on the migration. Some will need to keep software development and deployment moving along, while others work on the adoption. Make it clear to individual team members what their roles will be. They’ll automatically be more at ease if it’s clear what their migration responsibilities will be.\n\n## Plan for training\n\nAssure everyone there will be training. They won’t just be thrown into the deep end of the pool. Make sure they know you will be setting them up for success.\n\n## Create sample projects\n\n[Fatima Sarah Khalid](/company/team/#sugaroverflow), a developer evangelist at GitLab, says that even before a migration even begins, managers should ensure their team members are ready to use a DevOps platform to do everything from planning to testing, and pushing software iterations through to production. “Managers should think about having a sample project set up with issues and epics. Set up workflows and merge requests. Run it all through,” says Khalid. “Getting hands-on experience before the migration will get rid of anyone’s fear that they’ll break something.”\n\n## Lay out the benefits\n\nMake sure everyone understands the benefits of using a DevOps platform:\n\n- Your business will be able to quickly, securely, and efficiently turn a vision into software.\n\n- Working in isolated silos will be replaced with working in tandem with teammates, [collaborating, and sharing information and responsibilities](/blog/if-its-time-to-learn-devops-heres-where-to-begin/).\n\n- A single application will give an overarching view of projects, enabling teams to check in on, comment on and offer suggestions on projects as they move through the development lifecycle.\n\n- Security and compliance will increase as it will be built into every step of the development and deployment lifecycle.\n\n- [Built-in automation](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/) will reduce repetitive hands-on work with everything from testing to documentation.\n\nBy preparing teams to make the move to a DevOps platform, the entire migration process will be easier and more efficient. For more information on transitioning to an end-to-end platform, [check out this ebook](https://page.gitlab.com/migrate-to-devops-guide.html).\n",[721,9,873],{"slug":2157,"featured":6,"template":700},"eight-steps-to-prepare-your-team-for-a-devops-platform-migration","content:en-us:blog:eight-steps-to-prepare-your-team-for-a-devops-platform-migration.yml","Eight Steps To Prepare Your Team For A Devops Platform Migration","en-us/blog/eight-steps-to-prepare-your-team-for-a-devops-platform-migration.yml","en-us/blog/eight-steps-to-prepare-your-team-for-a-devops-platform-migration",{"_path":2163,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2164,"content":2170,"config":2175,"_id":2177,"_type":14,"title":2178,"_source":16,"_file":2179,"_stem":2180,"_extension":19},"/en-us/blog/eliminate-risk-with-feature-flags-tutorial",{"title":2165,"description":2166,"ogTitle":2165,"ogDescription":2166,"noIndex":6,"ogImage":2167,"ogUrl":2168,"ogSiteName":685,"ogType":686,"canonicalUrls":2168,"schema":2169},"How to use feature flags to lower risk in deployments","Follow this comprehensive tutorial to learn how to create and use feature flags in your software development environment.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667743/Blog/Hero%20Images/flags.png","https://about.gitlab.com/blog/eliminate-risk-with-feature-flags-tutorial","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use feature flags to lower risk in deployments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2023-09-20\",\n      }",{"title":2165,"description":2166,"authors":2171,"heroImage":2167,"date":2172,"body":2173,"category":741,"tags":2174},[1506],"2023-09-20","Developers typically use advanced techniques like canary, blue/green, and\nincremental deployments to reduce risk when practicing progressive delivery,\na facet of continuous delivery (CD). In this tutorial, we will show you how\nto use feature flags, another progressive delivery option developers can use\nto test while in production.\n\n\n## What is progressive delivery?\n\nProgressive delivery is the ability to test in production while controlling\nyour audience of who can exercise or see updates to an application with a\nhigh level of granularity. This approach can also be thought of as developer\nexperimentation.\n\n\n## What are feature flags\n\nFeature flags enable you to choose what to deploy and who to deploy to in\nproduction. They allow you to define the audience for your application\nupdates as well as the fashion in which they will be served.\n\n\nFeature flags help stakeholders reduce risk, allowing them to do controlled\ntesting of features and separate feature delivery from customer launch.\n\n\n## Benefits of feature flags\n\nThe following are benefits of GitLab's feature flags.\n\n- **Lower risk.** Feature flags prevent unscheduled outages, control your\naudience in a fine-grained fashion, and can be optionally used in\nconjunction with canary deployments.\n\n- **Ease of use.** Feature flags have simple configurability and\ninstrumentation, support user lists, and offer built-in service.\n\n- **Language agnostic.** Our feature flag implementation supports all of the\nmain programming languages.\n\n- **Better compliance and audit capabilities.** The GitLab platform\nautomatically records all feature flags actions.\n\n\n## Tutorial requirements\n\nThis is what you need for this tutorial:\n\n1. A GitLab account on gitlab.com SaaS\n\n2. Flux CLI installed on your local desktop (on my Mac, I installed it by\nexecuting `brew install fluxcd/tap/flux`)\n\n3. A running Kubernetes cluster, i.e. a GKE cluster with 3 e2-medium nodes\n\n4. `kubectl` connectivity to your Kubernetes cluster from a local Terminal\nwindow on your desktop\n\n\n## About this feature flag tutorial\n\nThis tutorial is based on a fictitious application, which is a simplified\ninventory system. The goal of this tutorial is to show you how to create,\nconfigure, and implement a feature flag using GitLab.\n\n\n**Note:** This tutorial is for learning purposes and not meant to deploy a\nproduction-ready architecture. Also, to keep the number of steps low, masked\nvariables and sealed secrets are not being used throughout this tutorial.\n\n\n## Flux and the GitLab agent for Kubernetes\n\nHere is how to install Flux and GitLab agent for Kubernetes.\n\n- Log on to your GitLab workspace.\n\n- Create a personal access token (PAT) from your GitLab account by\nnavigating to **User settings > Preferences > Access tokens**. In the\n**Personal Access Tokens** section, click on the **Add new token** button on\nthe righthand side of the section. For **Token name**, enter `pat-for-flux`.\nLeave the expiration date with its default (it should be 30 days from its\ncreation) and select the **API** scope for your **PAT**. Click on the\n**Create personal access token** button to create your PAT. Copy and save\nthe value of your **PAT**; you will need it at a later step.\n\n\n![create-pat](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/create-pat.png){:\n.shadow.medium.center}\n\nCreating a personal access token\n\n{: .note.text-center}\n\n\n- Head back to your GitLab workspace main page.\n\n- Create a group named “hn” by clicking the button **New group** (or **New\nsubgroup** if you are creating this group inside an existing group) on the\ntop right hand side of your screen, and then clicking on the **Create\ngroup** tile. Enter \"hn\" for your **Group name** and click on the **Create\ngroup** button to create it. Leave the rest of the fields with their\ndefaults.\n\n\n![create-group-hn](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/create-group-hn.png){:\n.shadow.medium.center}\n\nCreating group \"hn\"\n\n{: .note.text-center}\n\n\n- Inside group “hn”, create project “flux-config” by clicking the **New\nproject** on the top righthand side of your screen and then clicking on the\n**Create blank project** tile.\n\n\n![create-proj-flux-config](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/create-proj-flux-config.png){:\n.shadow.medium.center}\n\nCreating project \"flux-config\"\n\n{: .note.text-center}\n\n\n- From the Terminal window with `kubectl` access to your Kubernetes cluster,\nexport your **PAT** by entering the following command:\n\n\n> export GITLAB_TOKEN=`\u003Creplace with your PAT value>`\n\n\n- From the Terminal window with `kubectl` access to your Kubernetes cluster,\nbootstrap Flux by executing the following command:\n\n\n**Note:** Make sure to replace `\u003Cyour path>` with whatever precedes your\ngroup “hn”. For example, it could be `--owner=tech-marketing/sandbox/hn`, or\nif your group “hn” is at the very top level of your GitLab workspace, it\nwould be `--owner=hn`.\n\n\n```\n\nflux bootstrap gitlab \\\n  --owner=\u003Cyour path>/hn \\\n  --repository=flux-config \\\n  --branch=main \\\n  --path=clusters/my-cluster \\\n  --deploy-token-auth\n```\n\n\n![flux-bootstrap-output](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/flux-bootstrap-output.png){:\n.shadow.medium.center.}\n\nFlux bootstrap output\n\n{: .note.text-center}\n\n\nThe “flux-config” project should now contain new directories and files as\nshown below:\n\n\n![flux-config-post-bootstrap](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/flux-config-post-bootstrap.png){:\n.shadow.medium.center}\n\nProject flux-config post flux bootstrap process\n\n{: .note.text-center}\n\n\n- Head over to project **hn/flux-config** and create file\n“.gitlab/agents/k8s-agent/config.yaml” by clicking on the **+** sign next to\nthe “flux-config” and selecting **New file**. Paste the following into it\nthe new file:\n\n\n**Note:** Make sure to replace `\u003Cyour path>` with whatever precedes your\ngroup “hn”. For example, it could be `- id: tech-marketing/sandbox/hn` or if\nyour group “hn” is at the very top level of your GitLab workspace, it would\nbe `- id: hn`.\n\n\n```\n\nci_access:\n  groups:\n    - id: \u003Cyour path>/hn\n```\n\n\nCommit this file to main by clicking on the **Commit changes** button and\nensuring that the target branch is “main”.\n\n\n![create-config-yaml](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/create-config-yaml.png){:\n.shadow.medium.center}\n\nCreating the GitLab agent for Kubernetes configuration manifest\n\n{: .note.text-center}\n\n\n- Head to **Operate > Kubernetes clusters** and register the agent by\nclicking the **Connect a cluster** button.\n\n\n![register-agent](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/register-agent.png){:\n.shadow.medium.center}\n\nRegistering the GitLab agent for Kubernetes\n\n{: .note.text-center}\n\n\n- On the “Connect a Kubernetes cluster” dialog, click on the popdown list\nand select agent “k8s-agent”. Click on the **Register** button. The dialog\nwill refresh and show the **Agent access token**. Copy and save the **Agent\naccess token**; you will need it at a later step. Close the dialog by\nclicking on the **Close** button.\n\n\n![agent-access-token-dialog](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/agent-access-token-dialog.png){:\n.shadow.medium.center}\n\nThe agent access token to save\n\n{: .note.text-center}\n\n\nAt this moment, you will see the agent listed and its Connection status will\nbe “Never connected”.\n\n\n![agent-not-connected](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/agent-not-connected.png){:\n.shadow.medium.center}\n\nAgent registered but not connected yet\n\n{: .note.text-center}\n\n\n-  Head to **flux-config/clusters/my-cluster** directory and create a file\nnamed “namespace-gitlab.yaml” and paste the following into it:\n\n\n```\n\napiVersion: v1\n\nkind: Namespace\n\nmetadata:\n  name: gitlab\n```\n\n\n![gitlab-namespace-manifest](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/gitlab-namespace-manifest.png){:\n.shadow.medium.center}\n\nManifest for the gitlab namespace\n\n{: .note.text-center}\n\n\nCommit this file to main by clicking on the **Commit changes** button and\nensuring that the target branch is “main”.\n\n\n```\n\nNote: You can check that the namespace was created in your cluster by\nexecuting this command from a Terminal:\n\n\nkubectl get ns\n\n```\n\n\n![gitlab-ns-created](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/gitlab-ns-created.png){:\n.shadow.medium.center}\n\nFlux created gitlab namespace\n\n{: .note.text-center}\n\n\n- Before we have Flux deploy the GitLab agent for Kubernetes to your\ncluster, we need to create a secret, containing the **Agent access token**\nyou saved earlier, in your cluster. Create a file named “secret.yaml” in\nyour local desktop, paste the following into it and then save it:\n\n\n**Note:** Make sure to replace `\u003Cyour-agent-access-token-here>` with your\n**Agent access token** you saved earlier.\n\n\n```\n\napiVersion: v1\n\nkind: Secret\n\nmetadata:\n  name: gitlab-agent-token-initial\ntype: Opaque\n\nstringData:\n  values.yaml: |-\n    config:\n      token: \"\u003Cyour-agent-access-token-here>\"\n```\n\n\n![agent-token-secret](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/agent-token-secret.png){:\n.shadow.medium.center.}\n\nManifest for agent token secret created on local desktop\n\n{: .note.text-center}\n\n\n- Create the secret in your cluster by executing the following command from\na Terminal:\n\n\n> kubectl apply -f secret.yaml -n gitlab\n\n\n```\n\nNote: You can check that the secret was created in your cluster by executing\nthis command from a Terminal:\n\n\nkubectl get secrets -n gitlab\n\n```\n\n\n![apply-agent-token-secret](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/apply-agent-token-secret.png){:\n.shadow.medium.center}\n\nApplying the agent token secret to the Kubernetes cluster\n\n{: .note.text-center}\n\n\n- Now let’s use the Flux Helm Controller to deploy the GitLab agent for\nKubernetes to your cluster. Head to **flux-config/clusters/my-cluster**\ndirectory and create a file named “agentk.yaml” and paste the following into\nit:\n\n\n```\n\n---\n\napiVersion: source.toolkit.fluxcd.io/v1beta2\n\nkind: HelmRepository\n\nmetadata:\n  labels:\n    app.kubernetes.io/component: agentk\n    app.kubernetes.io/created-by: gitlab\n    app.kubernetes.io/name: agentk\n    app.kubernetes.io/part-of: gitlab\n  name: gitlab-agent\n  namespace: gitlab\nspec:\n  interval: 1h0m0s\n  url: https://charts.gitlab.io\n---\n\napiVersion: helm.toolkit.fluxcd.io/v2beta1\n\nkind: HelmRelease\n\nmetadata:\n  name: gitlab-agent\n  namespace: gitlab\nspec:\n  chart:\n    spec:\n      chart: gitlab-agent\n      sourceRef:\n        kind: HelmRepository\n        name: gitlab-agent\n        namespace: gitlab\n  interval: 1h0m0s\n  values:\n    replicas: 1\n    config:\n      kasAddress: \"wss://kas.gitlab.com\"  \n  valuesFrom:\n    - kind: Secret\n      name: gitlab-agent-token-initial\n      valuesKey: values.yaml\n```\n\n\n![create-agentk-manifest](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/create-agentk-manifest.png){:\n.shadow.medium.center}\n\nCreating the manifest for the GitLab agent for Kubernetes\n\n{: .note.text-center}\n\n\nCommit this file to main by clicking on the **Commit changes** button and\nensuring that the target branch is “main”.\n\n\n```\n\nNote: In a few seconds, you can check that the GitLab agent for Kubernetes\nwas created in your cluster by executing this command from a Terminal (the\npod name should start with “gitlab-agent”):\n\n\nkubectl get pods -n gitlab\n\n```\n\n![agentk-pod-up](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/agentk-pod-up.png){:\n.shadow.medium.center}\n\nAgentk running in the Kubernetes cluster\n\n{: .note.text-center}\n\n\n## Creating an instance of MySQL database in your cluster via Flux\n\n- Using the breadcrumb at the top of your window, head to group “hn” and\ncreate a new project by clicking on the **New project** button. On the\n**Create new project** window, click on the **Import project** tile.\n\n- At the **Import project** window, click on the **Repository by URL**\nbutton. The window will display fields to enter the URL of the repository\nyou would like to import. In the text field **Git repository URL**, enter\nthe following:\n\n\n>\n[https://gitlab.com/tech-marketing/sandbox/mysql.git](https://gitlab.com/tech-marketing/sandbox/mysql.git)\n\n\nLeave the rest of the fields with their defaults.\n\n\n![import-mysql-proj](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/import-mysql-proj.png){:\n.shadow.medium.center}\n\nImporting mysql project into group \"hn\"\n\n{: .note.text-center}\n\n\n- Click on the **Create project** button at the bottom of the screen. You\nwill see an \"Importing in progress\" message temporarily on your screen.\n\n- Now we need to create a deploy token for this project so that Flux can\ninteract with it. While in project “mysql”, select **Settings > Repository**\nand scroll down to the **Deploy tokens** section. Click on the **Expand**\nbutton to the right of the **Deploy tokens** section. Then click on the\n**Add token** button, which will expand the section to include fields to\nstart entering information for the deploy token to be created.\n\n- Give the deploy token the name “mysql-flux-deploy-token” and check the\ncheckbox **read_repository** for it. Then click on the button **Create\ndeploy token** to create the token.\n\n\n![create-mysql-deploy-token](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/create-mysql-deploy-token.png){:\n.shadow.medium.center}\n\nCreating the deploy token for \"mysql\" project for Flux to interact with it\n\n{: .note.text-center}\n\n\nCopy and save the username and password for the newly created deploy token;\nyou will need them at a later step.\n\n\n![mysql-deploy-token-created](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/mysql-deploy-token-created.png){:\n.shadow.medium.center}\n\nCreating the deploy token for \"mysql\" project for Flux to interact with it\n\n{: .note.text-center}\n\n\n-  From a Terminal, execute the following command to create a secret in your\ncluster for the deploy token you just created:\n\n\n**Note:** Make sure to replace `\u003Cyour path>` with the missing partial path\nto the project “mysql”, \u003Cyour-deploy-token-username> with the deploy token\nusername you saved earlier, and the \u003Cyour-deploy-token-password> with the\ndeploy token password you saved earlier.\n\n\n```\n\nflux create secret git mysql-flux-deploy-authentication \\\n         --url=https://gitlab.com/\u003Cyour path>/hn/mysql \\\n         --namespace=default \\\n         --username=\u003Cyour-deploy-token-username> \\\n         --password=\u003Cyour-deploy-token-password>\n```\n\n\n```\n\nNote: You can check that the secret was created in your cluster by executing\nthis command from a Terminal:\n\n\nkubectl -n default get secrets mysql-flux-deploy-authentication\n\n```\n\n\n![mysql-secret-created](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/mysql-secret-created.png){:\n.shadow.medium.center}\n\nCreating secret for the deploy token for \"mysql\" project in the Kubernetes\ncluster\n\n{: .note.text-center}\n\n\n- Head back to project “hn/flux-config” and open the Web IDE from it.\n\n\n![open-web-ide](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/open-web-ide.png){:\n.shadow.medium.center}\n\nSelecting Web IDE from the dropdown menu\n\n{: .note.text-center}\n\n\n- From inside the Web IDE, navigate to directory \"clusters/my-cluster\".\n\n\n![goto-clusters-mycluster](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/goto-clusters-mycluster.png){:\n.shadow.medium.center}\n\nNavigate to directory \"clusters/my-cluster\" in the Web IDE\n\n{: .note.text-center}\n\n\n- Inside “clusters/my-cluster” directory, create file\n“mysql-manifests-source.yaml” and paste the following text into it:\n\n\n**Note:** Replace `\u003Cyour path>` with the missing partial path to the project\n“mysql”\n\n\n```\n\napiVersion: source.toolkit.fluxcd.io/v1beta2\n\nkind: GitRepository\n\nmetadata:\n  name: mysql\n  namespace: default\nspec:\n  interval: 1m0s\n  ref:\n    branch: main\n  secretRef:\n    name: mysql-flux-deploy-authentication\n  url: https://gitlab.com/\u003Cyour path>/hn/mysql\n```\n\n\n![create-mysql-source-manifest](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/create-mysql-source-manifest.png){:\n.shadow.medium.center}\n\nCreating mysql-manifests-source.yaml file in the Web IDE\n\n{: .note.text-center}\n\n\n- Still in the Web IDE, inside “clusters/my-cluster” directory, create file\n“mysql-manifests-kustomization.yaml” and paste the following text into it:\n\n\n```\n\napiVersion: kustomize.toolkit.fluxcd.io/v1beta2\n\nkind: Kustomization\n\nmetadata:\n  name: mysql-source-kustomization\n  namespace: default\nspec:\n  interval: 1m0s\n  path: ./\n  prune: true\n  sourceRef:\n    kind: GitRepository\n    name: mysql\n    namespace: default\n  targetNamespace: default\n```\n\n\n![create-mysql-kustomization-manifest](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/create-mysql-kustomization-manifest.png){:\n.shadow.medium.center}\n\nCreating mysql-manifests-kustomization.yaml file in the Web IDE\n\n{: .note.text-center}\n\n\n- From the Web IDE, commit both files to the main branch by clicking on the\n**Source Control** icon on the left vertical menu, pressing the **Commit to\nmain** button.\n\n\n![commit-to-main](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/commit-to-main.png){:\n.shadow.medium.center}\n\nClicking on the Source Control icon and committing to main in the Web IDE\n\n{: .note.text-center}\n\n\nThen press the **Continue** button to confirm that you want to commit your\nchanges to the default branch:\n\n\n![commit-to-main-continue](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/commit-to-main-continue.png){:\n.shadow.medium.center}\n\nClicking on the Source Control icon and committing to main in the Web IDE\n\n{: .note.text-center}\n\n\n- Flux will deploy MySQL to your Kubernetes cluster. You can close the Web\nIDE browser tab at this point.\n\n\n```\n\nNote: You can check that the GitLab agent for Kubernetes was created in your\ncluster by executing this command from a Terminal:\n\n\nkubectl get pods -l app=mysql\n\n\nYou can check the persistent volume by executing this command from a\nTerminal:\n\n\nkubectl describe pvc mysql-pv-claim\n\n```\n\n\n![mysql-pod-and-pv-up](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/mysql-pod-and-pv-up.png){:\n.shadow.center}\n\nVerifying that mysql pod and its associated persitent volume claim are up\nand ready\n\n{: .note.text-center}\n\n\n- Now that the MySQL pod is up and running, we need to create a database,\ntables, and indexes in it and also populate some of the tables with dummy\ndata for the inventory system. Using the breadcrumb at the top of your\nwindow, head over to the “mysql” project and select **Build > Pipelines**\nfrom the left vertical navigation menu.\n\n\n![head-to-mysql-build-pipelines](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/head-to-mysql-build-pipelines.png){:\n.shadow.medium.center}\n\nHead to \"mysql\" project and select **Build > Pipelines** from the left\nvertical navigation menu\n\n{: .note.text-center}\n\n\n- Click on the **Run pipeline** button on the top right side of the\n**Pipelines** window. This will put you on the **Run pipeline** window.\nClick on the **Run pipeline** button on the bottom left of the **Run\npipeline** window leaving the rest of the fields with its defaults.\n\n\n![run-pipeline-button](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/run-pipeline-button.png){:\n.shadow.medium.center}\n\nClicking on the **Run pipeline** button to run the project \"mysql\" pipeline\n\n{: .note.text-center}\n\n\n- At this point you will see the pipeline stage and jobs. There are two jobs\nunder the **Build** stage: **create_and_load_db** and **clear_db**.\n\n\n![mysql-pipeline](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/mysql-pipeline.png){:\n.shadow.medium.center}\n\nThe \"mysql\" pipeline and its two manual jobs\n\n{: .note.text-center}\n\n\n- Click on the Play button (the right solid arrow) next to the\n**create_and_load_db** job name. This job will create a **product** table\nand a **users** table and populate them with dummy data. It will also create\ntables and indexes needed for storing all the session-related information as\nusers log in and log out from the inventory system.\n\n\n**Note:** The **clear_db** job should only be used if you’d like to erase\nall of the database resources created by the **create_and_load_db** job. The\n**clear_db** should only be used AFTER a failed run of the\n**create_and_load_db** job.\n\n\nNow that we have the database ready to go, let’s set up the project that we\nwill use for the creation of the feature flags.\n\n\n## Creating and importing projects\n\n- Head back to group “hn” and inside of it, create a cluster management\nproject (you can call it “cluster-management”) at the same level as the\nproject you imported above. You can view this [instructional\nvideo](https://www.youtube.com/watch?v=QRR3WuwnxXE&t=200s) (up to minute\n6:09) to see how to do this. While applying the steps in the video for this\ntutorial, adjust the variables values from the video to this post as\ndescribed in the following notes:\n\n\n**Note 1:** Make sure to create and set the KUBE_CONTEXT and KUBE_NAMESPACE\nvariable in group “hn” and to these values:\n\n\n| variable | value |\n\n| ---          | ---      |\n\n| KUBE_CONTEXT | `\u003Cyour path>`/hn/flux-config:k8s-agent |\n\n| KUBE_NAMESPACE | my-apps |\n\n\nFor example, in my case `\u003Cyour path>` was\n“tech-marketing/sandbox/hn/flux-config:k8s-agent”. In your case, it will be\ndifferent. If `\u003Cyour path>` is at the root of your GitLab workspace, then it\nwould be empty so the value of KUBE_CONTEXT would be\n“hn/flux-config:k8s-agent”.\n\n\n![add-var-KUBE_CONTEXT](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/add-var-KUBE_CONTEXT.png){:\n.shadow.medium.center}\n\nAdding variable KUBE_CONTEXT in group \"hn\"\n\n{: .note.text-center}\n\n\n![add-var-KUBE_NAMESPACE](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/add-var-KUBE_NAMESPACE.png){:\n.shadow.medium.center}\n\nAdding variable KUBE_NAMESPACE in group \"hn\"\n\n{: .note.text-center}\n\n\n**Note 2:** As an FYI, when uncommenting the GitLab managed apps in the\n“helmfile.yaml” file, there will not be one for Prometheus. So, you will\nonly uncomment the lines for ingress and cert-manager.\n\n\n![uncomment-ingress-and-cert-manager](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/uncomment-ingress-and-cert-manager.png){:\n.shadow.medium.center}\n\nUncommenting lines for ingress and cert-manager in file \"helmfile.yaml\"\n\n{: .note.text-center}\n\n\n**Note 3:** When the pipeline for project “cluster-management” runs, you\nwill notice that the job “sync” is a manual job. You will need to click on\nits **Play** (right arrow next to its name) button to run it. Wait until the\n“sync” job completes successfully before continuing.\n\n\n![click-play-on-sync-job](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/click-play-on-sync-job.png){:\n.shadow.medium.center}\n\nJob \"sync\" is manual so you need to press on the **Play** button next to its\nname\n\n{: .note.text-center}\n\n\n**Note 4:** Once the pipeline finishes, for your convenience, here is the\ncommand you need to run from a Terminal window to get the **external IP**\naddress of your cluster:\n\n\n```\n\nkubectl --namespace gitlab-managed-apps get services -o wide -w\ningress-ingress-nginx-controller\n\n```\n\n\n![getting-external-ip-address](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/getting-external-ip-address.png){:\n.shadow.medium.center}\n\nRunning `kubectl` command to get the ingress IP address to the cluster\n\n{: .note.text-center}\n\n\nCreate and set a variable `KUBE_INGRESS_BASE_DOMAIN` in group “hn” and set\nit to the **external IP** address of your cluster and append the suffix\n“.nip.io” to it.\n\n\n![add-var-KUBE_INGRESS_BASE_DOMAIN](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/add-var-KUBE_INGRESS_BASE_DOMAIN.png){:\n.shadow.medium.center}\n\nAddding variable KUBE_INGRESS_BASE_DOMAIN in group \"hn\"\n\n{: .note.text-center}\n\n\n- Inside group “hn”, create a new project. Click on the **New project**\nbutton. On the **Create new project** window, click on the **Import\nproject** tile and then click on the **Repository by URL** button.\n\n- This will expand the window and show fields to enter the URL of the\nrepository you would like to import. In the field **Git repository URL**,\nenter the following:\n\n\n>\n[https://gitlab.com/tech-marketing/sandbox/prodmgr.git](https://gitlab.com/tech-marketing/sandbox/prodmgr.git)\n\n\nLeave the rest of the fields with their defaults.\n\n\n![import-prodmgr-proj](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/import-prodmgr-proj.png){:\n.shadow.medium.center}\n\nImporting project \"prodmgr\" into group \"hn\"\n\n{: .note.text-center}\n\n\n- Click on the **Create project** button at the bottom of the screen. You\nwill see an **Importing in progress** message temporarily on your screen.\n\n- In project “prodmgr”, create a pipeline file and make sure to name it\n“.gitlab-ci.yml”. Paste the following code block into the empty file:\n\n\n```\n\ninclude:\n  template: Auto-DevOps.gitlab-ci.yml\n\nvariables:\n  K8S_SECRET_TF_VAR_dbusername: \"sasha\"\n  K8S_SECRET_TF_VAR_dbpassword: \"password\"\n  TEST_DISABLED: \"true\"\n  CODE_QUALITY_DISABLED: \"true\"\n  LICENSE_MANAGEMENT_DISABLED: \"true\"\n  BROWSER_PERFORMANCE_DISABLED: \"true\"\n  LOAD_PERFORMANCE_DISABLED: \"true\"\n  SAST_DISABLED: \"true\"\n  SECRET_DETECTION_DISABLED: \"true\"\n  DEPENDENCY_SCANNING_DISABLED: \"true\"\n  CONTAINER_SCANNING_DISABLED: \"true\"\n  DAST_DISABLED: \"true\"\n  REVIEW_DISABLED: \"true\"\n  CODE_INTELLIGENCE_DISABLED: \"true\"\n  CLUSTER_IMAGE_SCANNING_DISABLED: \"true\"\n  POSTGRES_ENABLED: \"false\"\n  STAGING_ENABLED: \"true\"\n  INCREMENTAL_ROLLOUT_MODE: \"manual\"\n```\n\n\nClick on the **Commit changes** button ensuring that the **Target branch**\nis main.\n\n\n![prodmgr-proj-pipeline](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/prodmgr-proj-pipeline.png){:\n.shadow.medium.center}\n\nCreating an Auto-DevOps-based pipeline for project \"prodmgr\"\n\n{: .note.text-center}\n\n\n- The previous step builds the application and deploys it to the staging\nenvironment. Once deployed to staging, head to **Build > Pipelines** and\nclick on the most recently executed pipeline (should be the first one in the\nlist). Click on the pipeline to display it and then deploy the application\nto production by clicking on “rollout 100%” job.\n\n\n![rollout-to-prod](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/rollout-to-prod.png){:\n.shadow.medium.center}\n\nTo deploy the application to production, click on the **rollout 100%** Play\nbutton\n\n{: .note.text-center}\n\n\nAt this point, you have a running application in the staging and production\nenvironments in your Kubernetes cluster. Let’s start creating a feature\nflag.\n\n\n## Creating a new feature flag\n\n-  In project “prodmgr”, select **Deploy > Feature flags** from your left\nvertical navigation menu.\n\n\n### Creating a user list\n\n- Click on the link **View user lists** on the top right hand side of your\nscreen.\n\n- Click on the **New user list** button on the top right hand side of your\nscreen.\n\n- In the **Name** field of the user list, enter\n“prods-in-alphabetical-order-userlist” and then click on the **Create**\nbutton.\n\n\n![create-ff-userlist](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/create-ff-userlist.png){:\n.shadow.medium.center.}\n\nCreating user list named \"prods-in-alphabetical-order-userlist”\n\n{: .note.text-center}\n\n\n- On the next screen, click on the **Add Users** button on the top right\nhand side of your screen.\n\n- In the **User IDs** text field, enter the following two email addresses\nand then click on the **Add** button:\n\n\n> michael@cfl.rr.com,mary@cfl.rr.com\n\n\n![add-users-to-list](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/add-users-to-list.png){:\n.shadow.medium.center}\n\nAdding users to user list \"prods-in-alphabetical-order-userlist”\n\n{: .note.text-center}\n\n\n- Head back to the Feature flags window by selecting **Deploy > Feature\nflags** from your left vertical navigation menu.\n\n\n### Creating the flag\n\n- Click on the **New feature flag** button on the top right hand side of\nyour screen.\n\n- In the **New feature flag** window, enter\n“prods-in-alphabetical-order-ff”.\n\n\n### Specifying the strategy for the production environment\n\nIn the **Strategies** section of the **New feature flag** window, there\nshould already be sub-sections for **Type** and **Environments**.\n\n- For **Type**, select **Percent rollout** from the dropdown menu.\n\n- For **Percentage**, enter **50** in the field.\n\n- For **Based on**, ensure that **Available ID** is selected from the\npopdown menu.\n\n- For **Environments**, click on the **+** sign and select the\n**production** environment.\n\n\n### Specifying the strategy for the staging environment\n\n- Click on the **Add strategy** button on the right hand side of the\n**Strategies** section. A new sub-section for another strategy will appear.\n\n- For **Type**, select **User List** from the dropdown menu.\n\n- For **User List**, select the user list\n**prods-in-alphabetical-order-userlist**.\n\n- For **Environments**, click on the **+** sign and select the **staging**\nenvironment.\n\n- Click on **Create feature flag** button at the bottom of your screen to\ncomplete the creation of the feature flag.\n\n\n![ff-and-strats-def](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/ff-and-strats-def.png){:\n.shadow.medium.center}\n\nDefining the feature flag with its strategies for strating and production\nenvironments\n\n{: .note.text-center}\n\n\n## Sharing feature flag configuration information with developers\n\nIn order for developers to instrument their code for this feature flag, you\nneed to share with them the following information:\n\n- On the **Feature flags** window, click on the **Configure** button on the\ntop right hand side of your screen.\n\n- Copy and save the values of **API URL** (URL where the client application\nconnects to get a list of feature flags) and **Instance ID** (unique token\nthat authorizes the retrieval of the feature flags). These are the two\nvalues that you will need for feature flag instrumentation.\n\n\n![ff-api-url-and-instance-id](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/ff-api-url-and-instance-id.png){:\n.shadow.medium.center}\n\nCopy and save the values for the feature flag API URL and Instance ID\n\n{: .note.text-center}\n\n\n- Head over to **Settings > CI/CD** and scroll down to the **Variables**\nsection and click on its **Expand** button. Add the following two variables\nto your project:\n\n\n| Variable Key | Variable Value | Variable Type | Environment Scope | Flag -\nProtect variable | Flag - Mask variable\n\n| ----------- | ----------- | ----------- |----------- | ----------- |\n----------- |\n\n| K8S_SECRET_UNLEASH_URL | \\\u003Csaved **API URL** value\\> | Variable | All\n(default) | unchecked | unchecked\n\n| K8S_SECRET_UNLEASH_INSTANCE_ID | \\\u003Csaved **Instance ID** value\\> |\nVariable | All (default) | unchecked | unchecked\n\n\n![add-var-K8S_SECRET_UNLEASH_URL](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/add-var-K8S_SECRET_UNLEASH_URL.png){:\n.shadow.medium.center}\n\nAdding variable K8S_SECRET_UNLEASH_URL to project \"prodmgr\"\n\n{: .note.text-center}\n\n\n![add-var-K8S_SECRET_UNLEASH_INSTANCE_ID](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/add-var-K8S_SECRET_UNLEASH_INSTANCE_ID.png){:\n.shadow.medium.center}\n\nAdding variable K8S_SECRET_UNLEASH_INSTANCE_ID to project \"prodmgr\"\n\n{: .note.text-center}\n\n\nThese two variables contain values that will be passed to your application\n(via the K8S_SECRET_ keyword) so that it can make use of the feature flags\ndefined and managed by GitLab.\n\n\nIn order for your application to be able to use feature flags, you need to\ninstrument your application with our Feature Flags framework. Let's see how\nyou do this in the sample Java application.\n\n\n## Instrumenting the code\n\nIn this example, we are using the Java client for Unleash but if you’re\nusing a different programming language then you need to use the client\nlibrary for your language. To get all the supported languages, refer to the\n[Unleash documentation](https://docs.getunleash.io/reference/sdks) or\n[Unleash open source\nproject](https://github.com/Unleash/unleash#unleash-sdks).\n\n\n### Instrumenting Java class files\n\n- In project “prodmgr”, navigate to the directory\n`src/main/java/csaa/jspring/ProductManager`.\n\n- Click on the file name “AppController.java” to view its contents and then\nclick on the Edit button to enter edit mode.\n\n- You will see a few code blocks that have been commented out and are\npreceded by the line:\n\n\n> // Uncomment block below to instrument Feature Flag\n\n\nUncomment all the code blocks under each of the lines indicated above.\n\n\n![java-file-with-uncommented-lines](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/java-file-with-uncommented-lines.png){:\n.shadow.medium.center}\n\nPartial view of AppController.java file with uncommented code blocks\n\n{: .note.text-center}\n\n\n- Commit the changes to the main branch.\n\n- The commit starts a pipeline that deploys the application to the staging\nenvironment. Head to **Build > Pipelines** and click on the most recently\nexecuted pipeline (should be the first one in the list). Click on the\npipeline to display it and wait until the **staging** job finishes. Then\ndeploy the application to production by clicking on “rollout 100%” job.\n\n\nNow that the application is running in the staging and production\nenvironments, let’s see the feature flag in action.\n\n\n## Feature flag in action\n\nNow let's check how the feature flag is working.\n\n### Checking the feature flag in the staging environment\n\n- In project “prodmgr”, click on **Operate > Environments** to see the list\nof all environments. Then click on the \"Open live environment\" button for\nthe staging environment.\n\n- A new browser tab will appear and will display a login screen. If your\nbrowser complains about the connection being insecure, accept the risk and\nopen the browser tab.\n\n- Remember that the feature flag strategy for staging is based on the user\nlist containing michael and mary in it. Let’s try logging in as each of\nthem.\n\n- Enter credentials michael@cfl.rr.com with password p33sw0rd. Verify that\nMichael gets a product list sorted in alphabetical order. Log out and close\nthe browser tab to ensure that his session closes.\n\n\n![michael-gets-ff](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/michael-gets-ff.png){:\n.shadow.medium.center}\n\nMichael gets the feature flag that orders the list of product names in\nalphabetical order\n\n{: .note.text-center}\n\n\n- From the Environments window, click on the \"Open live environment\" button\nfor the staging environment. Enter credentials \"mary@cfl.rr.com\" with\npassword \"p33sw0rd\". Verify that mary gets a product list sorted in\nalphabetical order. Log out and close the browser tab to ensure that her\nsession closes.\n\n- From the Environments window, click on the \"Open live environment\" button\nfor the staging environment. This time, enter credentials for\n\"thomas@gmail.com\" with password \"p33sw0rd\". Verify that thomas does **not**\nget a product list sorted in alphabetical order. Log out and close the\nbrowser tab to ensure that his session closes.\n\n\n![thomas-does-not-get-the-ff](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/thomas-does-not-get-the-ff.png){:\n.shadow.medium.center}\n\nThomas does not get the feature flag because the product names are not\nordered in alphabetical order\n\n{: .note.text-center}\n\n\nThe steps above demonstrate that the feature flag strategy for staging\nsuccessfully worked.\n\n\n### Checking the feature flag in the production environment\n\n- Click on **Operate > Environments** to see the list of all environments.\nThen click on the \"Open live environment\" button for the production\nenvironment.\n\n- A new browser tab will appear and will display a login screen. If your\nbrowser complains about the connection being insecure, accept the risk and\nopen the browser tab.\n\n- Remember that the strategy in production is that the feature will be\nserved to 50% of the users. Try logging into the web application as each of\nthe following users keeping track of who gets the list of products sorted in\nalphabetical order by name and who does not:\n\n\n**Note:** Remember to click on the \"Open live environment\" button for the\n**production** environment. Once you log out from each user, remember to\n**close** the browser tab to ensure that the session closes.\n\n\n| Username | Password\n\n| ----------- | ----------- |\n\n| peter@gmail.com | pa33w0rd\n\n| magic@cfl.rr.com | pa33w0rd\n\n| michael@cfl.rr.com | pa33w0rd\n\n| henry@gmail.com | pa33w0rd\n\n| mary@cfl.rr.com | pa33w0rd\n\n| thomas@gmail.com | pa33w0rd\n\n\nYour final count should consist of three users being served the feature and\nthree not, matching the strategy that was set for the production\nenvironment.\n\n\nAs changes are made to feature flags, you can track them from the audit\nevents window.\n\n\n## Auditing feature flag changes\n\n**Note:** A Premium GitLab subscription is needed for viewing Audit events.\n\n\n- In project “prodmgr”, select **Secure > Audit events** from the left\nvertical navigation menu.\n\n- This displays all the events that have occurred in GitLab for the last\nthirty days. You will see that events related to updates to feature flags\nare listed.\n\n\n![audit-events-list](https://about.gitlab.com/images/blogimages/feature-flags-tutorial/audit-events-list.png){:\n.shadow.medium.center}\n\nAudit events is an auditable list of actions that have been taken againt\nresources\n\n{: .note.text-center}\n\n\nThis auditing allows you to identify when and who made changes to feature\nflags. It can also help preempt out-of-compliance scenarios and streamline\naudits to avoid penalties, providing an opportunity to optimize cost, and\nlower risk of unscheduled production outages.\n\n\nNow you know how to create and use feature flags to lower your deployment\nrisk.\n\n\nPhoto by \u003Ca\nhref=\"https://unsplash.com/@liamdesic?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Liam\nDesic\u003C/a> on \u003Ca\nhref=\"https://unsplash.com/photos/acKSt3THWKA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>\n",[9,695,696],{"slug":2176,"featured":6,"template":700},"eliminate-risk-with-feature-flags-tutorial","content:en-us:blog:eliminate-risk-with-feature-flags-tutorial.yml","Eliminate Risk With Feature Flags Tutorial","en-us/blog/eliminate-risk-with-feature-flags-tutorial.yml","en-us/blog/eliminate-risk-with-feature-flags-tutorial",{"_path":2182,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2183,"content":2189,"config":2196,"_id":2198,"_type":14,"title":2199,"_source":16,"_file":2200,"_stem":2201,"_extension":19},"/en-us/blog/empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners",{"title":2184,"description":2185,"ogTitle":2184,"ogDescription":2185,"noIndex":6,"ogImage":2186,"ogUrl":2187,"ogSiteName":685,"ogType":686,"canonicalUrls":2187,"schema":2188},"GPU-enabled runners for ModelOps and HPC workloads in CI/CD","Learn how to leverage our GitLab-hosted GPU-enabled runners for ModelOps and high-performance computing workloads.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682702/Blog/Hero%20Images/gitlab-data-science-icon.png","https://about.gitlab.com/blog/empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Empower ModelOps and HPC workloads with GPU-enabled runners integrated with CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabriel Engel\"}],\n        \"datePublished\": \"2023-07-06\",\n      }",{"title":2190,"description":2185,"authors":2191,"heroImage":2186,"date":2193,"body":2194,"category":849,"tags":2195},"Empower ModelOps and HPC workloads with GPU-enabled runners integrated with CI/CD",[2192],"Gabriel Engel","2023-07-06","\u003Ci>This blog post is the latest in an ongoing series about GitLab's journey\nto [build and integrate AI/ML into our DevSecOps\nplatform](/blog/ai-ml-in-devsecops-series/). Start with the first\nblog post: [What the ML is up with DevSecOps and\nAI?](/blog/what-the-ml-ai/). Throughout the series, we'll feature\nblogs from our product, engineering, and UX teams to showcase how we're\ninfusing AI/ML into GitLab.\u003C/i>\n\n\nIn today's fast-paced world, organizations are constantly looking to improve\ntheir [ModelOps](/direction/modelops/) and high-performance computing (HPC)\ncapabilities. Leveraging powerful graphical processing units\n([GPUs](https://www.techtarget.com/searchvirtualdesktop/definition/GPU-graphics-processing-unit))\nhas become a game-changer for accelerating machine learning workflows and\ncompute-intensive tasks. To help meet these evolving needs, we recently\nreleased our first GPU-enabled runners on GitLab.com.\n\n\nSecurely hosting a GitLab Runner environment for ModelOps and HPC is\nnon-trivial and requires a lot of knowledge and time to set up and maintain.\nIn this blog post, we'll look at some real-world examples of how you can\nharness the potential of GPU computing for ModelOps or HPC workloads while\ntaking full advantage of a SaaS solution.\n\n\n## What are GPU-enabled runners?\n\nGPU-enabled runners are dedicated computing resources for the AI-powered\nDevSecOps platform. They provide accelerated processing power for ModelOps\nand HPC such as the training or deployment of large language models\n([LLMs](https://www.techtarget.com/whatis/definition/large-language-model-LLM))\nas part of ModelOps workloads. In the first iteration of releasing\nGPU-enabled runners, [GitLab.com SaaS\noffers](https://docs.gitlab.com/ee/ci/runners/saas/gpu_saas_runner.html) the\nGCP `n1-standard-4` machine type (4 vCPU, 15 GB memory) with 1 NVIDIA T4 (16\nGB memory) attached. The runner behaves like a GitLab Runner on Linux, using\nthe docker+machine [executor](https://docs.gitlab.com/runner/executors/). \n\n\n## Using GPU-enabled runners\n\nTo take advantage of GitLab GPU-enabled runners, follow these steps:\n\n\n### 1. Have a project on GitLab.com\n\nAll projects on GitLab.com SaaS with a `Premium` or `Ultimate`\n[subscription](https://about.gitlab.com/pricing/) have the GPU-enabled\nrunners enabled by default - no additional configuration is required.\n\n\n### 2. Create a job running on GPU-enabled runners\n\nCreate a job in your `.gitlab-ci.yml` configuration file, and set the\n[runner\n`tag`](https://docs.gitlab.com/ee/ci/runners/configure_runners.html#use-tags-to-control-which-jobs-a-runner-can-run)\nto the `saas-linux-medium-amd64-gpu-standard` value. \n\n\n```yaml\n\ngpu-job:\n  stage: build\n  tags:\n    - saas-linux-medium-amd64-gpu-standard\n```\n\n\n### 3. Select a Docker image with the Nvidia CUDA driver\n\n\nThe CI/CD job runs in an isolated virtual machine (VM) with a\nbring-your-own-image policy as with [GitLab SaaS runners on\nLinux](https://docs.gitlab.com/ee/ci/runners/saas/linux_saas_runner.html).\nGitLab mounts the GPU from the host VM into your isolated environment. You\nmust use a Docker image with the GPU driver installed to use the GPU. For\nNvidia GPUs, you can use the [CUDA\nToolkit](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda)\ndirectly, or third-party images with Nvidia drivers installed, such as the\n[TensorFlow GPU image](https://hub.docker.com/r/tensorflow/tensorflow/).\n\n\nThe CI/CD job configuration for the Nvidia CUDA base Ubuntu image looks like\nthis:\n\n\n```yaml\n  image: nvcr.io/nvidia/cuda:12.1.1-base-ubuntu22.04\n```\n\n\n### 4. Verify that the GPU is working\n\nTo verify that the GPU drivers are working correctly, you can execute the\n`nvidia-smi` command in the CI/CD job `script` section. \n\n\n```yaml\n  script:\n    - nvidia-smi\n```\n\n\n## Basic usage examples\n\nLet's explore some basic scenarios where GPU-enabled runners can supercharge\nyour ModelOps and HPC workloads:\n\n\n### Example 1: ModelOps with Python\n\nIn this example, we train a model on our GPU-enabled runner defined in the\n`train.py` file using the Nvidia CUDA base Ubuntu image mentioned earlier.\n\n\n`.gitlab-ci.yml` file:\n\n```yaml\n\nmodel-training:\n  stage: build\n  tags:\n    - saas-linux-medium-amd64-gpu-standard\n  image: nvcr.io/nvidia/cuda:12.1.1-base-ubuntu22.04\n  script:\n    - apt update\n    - apt install -y --no-install-recommends python3 python3-pip \n    - pip3 install -r requirements.txt\n    - python3 --version\n    - python3 train.py\n```\n\n\n### Example 2: Scientific simulations and HPC\n\nComplex scientific simulations require significant computing resources.\nGPU-enabled runners can accelerate these simulations, allowing you to get\nresults in less time.\n\n\n`.gitlab-ci.yml` file:\n\n```yaml\n\nsimulation-run:\n  stage: build\n  tags:\n    - saas-linux-medium-amd64-gpu-standard\n  image: nvcr.io/nvidia/cuda:12.1.1-base-ubuntu22.04\n  script:\n    - ./run_simulation --input input_file.txt\n```\n\n\n## Advanced usage examples\n\nLet's go through some real-world scenarios of how we use GPU-enabled runners\nat GitLab.\n\n\n### Example 3: Python model training with a custom Docker image\n\nFor our third example, we will use this [handwritten digit recognition\nmodel](https://gitlab.com/gitlab-org/modelops/demos/handwritten-digit-recognition).\nWe are using this project as a demo to showcase or try out new ModelOps\nfeatures.\n\n\n[Open the\nproject](https://gitlab.com/gitlab-org/modelops/demos/handwritten-digit-recognition)\nand fork it into your preferred namespace. You can follow the next steps\nusing the [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/) in the\nbrowser, or clone the project locally to create and edit the files. Some of\nthe next steps require you to override existing configuration in the\n`Dockerfile` and `.gitlab-ci.yml`. \n\n\nAs we need more pre-installed components and want to save installation time\nwhen training the model, we decided to create a custom Docker image with all\ndependencies pre-installed. This also gives us full control over the build\nenvironment we use and allows us to reuse it locally without relying on the\n`.gitlab-ci.yml' implementation.\n\n\nIn addition, we are using a more complete pipeline configuration with the\nfollowing stages:\n\n\n```yaml\n\nstages:\n  - build\n  - test\n  - train\n  - publish\n```\n\n\n![GPU pipeline\noverview](https://about.gitlab.com/images/blogimages/2023-07-06-gpu-enabled-runners-for-modelops/pipeline-overview.png)\n\n\n#### Building a custom Docker image\n\nThe first step is to define a `Dockerfile`. In this example, we start with\nthe Nvidia CUDA base Ubuntu image and then install `Python3.10`. Using `pip\ninstall`, we then add all the required libraries specified in a\n`requirements.txt` file.\n\n\n```docker\n\nFROM nvcr.io/nvidia/cuda:12.1.1-base-ubuntu22.04\n\n\n1. Update and install required packages\n\nRUN apt-get update && apt-get install -y \\\n    python3.10 \\\n    python3.10-dev \\\n    python3-pip \\\n    && rm -rf /var/lib/apt/lists/*\n\n2. Set Python 3.10 as the default Python version\n\nRUN ln -s /usr/bin/python3.10 /usr/bin/python\n\n\n3. Copy the requirements.txt file\n\nCOPY requirements.txt /tmp/requirements.txt\n\n\n4. Install Python dependencies\n\nRUN pip3 install --no-cache-dir -r /tmp/requirements.txt\n\n```\n\n\nIn the `.gitlab-ci.yml` file we use\n[Kaniko](https://docs.gitlab.com/ee/ci/docker/using_kaniko.html) to build\nthe Docker image and push it into the [GitLab Container\nRegistry](https://docs.gitlab.com/ee/user/packages/container_registry/).\n\n\n```yaml\n\nvariables:\n  IMAGE_PATH: \"${CI_REGISTRY_IMAGE}:latest\"\n  GIT_STRATEGY: fetch\n\ndocker-build:\n  stage: build\n  tags:\n    - saas-linux-medium-amd64\n  image:\n    name: gcr.io/kaniko-project/executor:v1.9.0-debug\n    entrypoint: [\"\"]\n  script:\n    - /kaniko/executor\n      --context \"${CI_PROJECT_DIR}\"\n      --dockerfile \"${CI_PROJECT_DIR}/Dockerfile\"\n      --destination \"${IMAGE_PATH}\"\n      --destination \"${CI_REGISTRY_IMAGE}:${CI_COMMIT_TAG}\"\n  rules:\n    - if: $CI_COMMIT_TAG\n```\n\n\nIn [rules](https://docs.gitlab.com/ee/ci/yaml/#rules) we define to only\ntrigger the Docker image build for a new git tag. The reason is simple - we\ndon't want to run the image build process for every time we train the model.\n\n\nTo start the image build job [create a new Git\ntag](https://docs.gitlab.com/ee/user/project/repository/tags/#create-a-tag).\nYou can either do this by using `git tag -a v0.0.1` command or via UI.\nNavigate into `Code > Tags` and click on `New Tag`. As Tag name type\n`v0.0.1` to create a new Git tag and trigger the job.\n\n\nNavigate to `Build > Pipelines` to verify the `docker-build` job status, and\nthen locate the tagged image following [`Deploy > Container\nRegistry`](https://docs.gitlab.com/ee/user/packages/container_registry/).\n\n\n![Docker\nimage](https://about.gitlab.com/images/blogimages/2023-07-06-gpu-enabled-runners-for-modelops/gpu-docker-image.png)\n\n\n#### Testing the Docker image\n\nTo test the image, we will use the following `test-image` job and run\n`nvidia-smi` and check that the GPU drivers are working correctly.\n\n\nThe job configuration in `.gitlab-ci.yml` file looks as follows:\n\n\n```yaml\n\ntest-image:\n  stage: test\n  tags:\n    - saas-linux-medium-amd64-gpu-standard\n  image: $IMAGE_PATH\n  script:\n    - nvidia-smi\n  rules:\n    - if: $CI_COMMIT_TAG\n```\n\n\nWe also include container scanning and more [security\nscanning](https://docs.gitlab.com/ee/user/application_security/) templates\nin the `.gitlab-ci.yml` file.\n\n\n```yaml\n\ninclude:\n  - template: Security/Secret-Detection.gitlab-ci.yml\n  - template: Security/Container-Scanning.gitlab-ci.yml\n  - template: Jobs/Dependency-Scanning.gitlab-ci.yml\n  - template: Security/SAST.gitlab-ci.yml\n```\n\n\n#### Training the model with our custom Docker image\n\nNow that we have built our Custom docker image, we can train the model\nwithout installing any more dependencies in the job.\n\n\nThe train job in our `.gitlab-ci.yml` looks like this:\n\n\n```yaml\n\ntrain:\n  stage: train\n  tags:\n    - saas-linux-medium-amd64-gpu-standard\n  image: $IMAGE_PATH\n  script:\n    - python train_digit_recognizer.py\n  artifacts:\n    paths:\n      - mnist.h5\n    expose_as: 'trained model'\n```\n\n\nNavigate to `Build > Pipelines` to see the job logs.\n\n\n![Train job\nlogs](https://about.gitlab.com/images/blogimages/2023-07-06-gpu-enabled-runners-for-modelops/train-job-log.png)\n\n\nFrom here, you can also inspect the `train` job artifacts.\n\n\n#### Publishing the model\n\nIn the last step of our `.gitlab-ci.yml` file, we are going to publish the\ntrained model.\n\n\n```yaml\n\npublish:\n  stage: publish\n  when: manual\n  dependencies:\n    - train\n  image: curlimages/curl:latest\n  script:\n    - 'curl --header \"JOB-TOKEN: $CI_JOB_TOKEN\" --upload-file mnist.h5 \"${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/MNIST-Model/${CI_COMMIT_TAG}/mnist.h5\"'\n```\n\n\nNavigate to `Build > Pipelines` and trigger the `publish` job manually.\nAfter that, navigate into `Deploy > Package Registry` to verify the uploaded\ntrained model.\n\n\n![Package\nRegistry](https://about.gitlab.com/images/blogimages/2023-07-06-gpu-enabled-runners-for-modelops/package-registry.png)\n\n\n### Example 4: Jupyter notebook model training for ML-powered GitLab Issue\ntriage\n\n\nIn the last example, we are using our GPU-enabled runner to train the\ninternal [GitLab model to triage\nissues](https://gitlab.com/gitlab-org/ml-ops/tanuki-stan/-/tree/using-gpu-enabled-runner).\nWe use this model at GitLab to determine and assign issues to the right team\nfrom the context of the issue description.\n\n\nDifferent from the previous examples, we now use the [`tensorflow-gpu`\ncontainer image](https://hub.docker.com/r/tensorflow/tensorflow) and install\nthe\n[requirements](https://gitlab.com/gitlab-org/ml-ops/tanuki-stan/-/blob/using-gpu-enabled-runner/notebooks/requirements.tensorflow-gpu.txt)\nin the job itself.\n\n\n`.gitlab-ci.yml` configuration:\n\n\n```yaml\n\ntrain:\n  tags:\n    - saas-linux-medium-amd64-gpu-standard\n  image: tensorflow/tensorflow:2.4.1-gpu\n  script:\n    - nvidia-smi\n    - cd notebooks\n    - pip install -r requirements.tensorflow-gpu.txt\n    - jupyter nbconvert --to script classify_groups.ipynb\n    - apt-get install -y p7zip-full\n    - cd ../data\n    - 7z x -p${DATA_PASSWORD} gitlab-issues.7z\n    - cd ../notebooks\n    - python3 classify_groups.py\n  artifacts:\n    paths:\n      - models/\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\" || $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH  \n      when: manual\n      allow_failure: true\n```\n\n\n![TensorFlow\ntrain](https://about.gitlab.com/images/blogimages/2023-07-06-gpu-enabled-runners-for-modelops/tensorflow-train.png)\n\n\nIf you are interested in another Jupyter notebook example, check out our\nrecently published video on [Training ML Models using GPU-enabled\nrunner](https://youtu.be/tElegG4NCZ0).\n\n\n\u003Ciframe width=\"768\" height=\"432\"\nsrc=\"https://www.youtube.com/embed/tElegG4NCZ0\" title=\"YouTube video player\"\nframeborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write;\nencrypted-media; gyroscope; picture-in-picture; web-share\"\nallowfullscreen>\u003C/iframe>\n\n\n## Results\n\nThe integration of GPU-enabled runners on GitLab.com SaaS opens up a new\nrealm of possibilities for ModelOps and HPC workloads.\n\nBy harnessing the power of GPU-enabled runners, you can accelerate your\nmachine learning workflows, enable faster data processing, and improve\nscientific simulations, all while taking full advantage of a SaaS solution\nand avoiding the hurdles of hosting and maintaining your own build hardware.\n\n\nWhen you try the GPU-enabled runners, please share your experience in our\n[feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/403008).\n\n\nCompute-heavy workloads can take a long time. A known problem is timeouts\nafter three hours because of the current [configuration of GitLab SaaS\nrunners](https://docs.gitlab.com/ee/ci/runners/#how-saas-runners-work).\n\nWe plan to release more powerful compute for future iterations to handle\nheavier workloads faster. You can follow updates about GPU-enabled runners\nin the [GPU-enabled runners\nepic](https://gitlab.com/groups/gitlab-org/-/epics/8648) and learn more in\nthe [GPU-enabled runners\ndocumentation](https://docs.gitlab.com/ee/ci/runners/saas/gpu_saas_runner.html).\n",[696,851,9,693,695],{"slug":2197,"featured":6,"template":700},"empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners","content:en-us:blog:empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners.yml","Empowering Modelops And Hpc Workloads With Gpu Enabled Runners","en-us/blog/empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners.yml","en-us/blog/empowering-modelops-and-hpc-workloads-with-gpu-enabled-runners",{"_path":2203,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2204,"content":2210,"config":2215,"_id":2217,"_type":14,"title":2218,"_source":16,"_file":2219,"_stem":2220,"_extension":19},"/en-us/blog/enables-rapid-innovation",{"title":2205,"description":2206,"ogTitle":2205,"ogDescription":2206,"noIndex":6,"ogImage":2207,"ogUrl":2208,"ogSiteName":685,"ogType":686,"canonicalUrls":2208,"schema":2209},"GitLab uniquely enables rapid innovation","Learn about some of the ways GitLab can uniquely enable your developers to innovate more rapidly.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681603/Blog/Hero%20Images/rapids-cover-1275x750.jpg","https://about.gitlab.com/blog/enables-rapid-innovation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab uniquely enables rapid innovation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2020-09-30\",\n      }",{"title":2205,"description":2206,"authors":2211,"heroImage":2207,"date":2212,"body":2213,"category":978,"tags":2214},[1506],"2020-09-30","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nA challenge that organizations often face is the amount of time spent maintaining their IT systems vs. the time spent innovating and developing differentiating features for customers.  This challenge has become even more difficult during a global pandemic where working from home makes it harder to engage with your customers in person and digital channels have become the primary vehicle to do business with consumers of your services and products. Rapid innovation means your organization and teams can deliver lovable features faster and get value into the hands of customers sooner. This is more urgent than ever before to remain competitive and ultimately survive in this new business reality, and requires your developers to spend more time creating and developing code rather than managing multiple disparate tools, environments, and processes.\n\nGitLab uniquely enables rapid innovation by simplifying the adoption of DevOps practices so that your developers can spend more time creating innovative features and applications that matter to your customers. \n\nWatch this video (~6 mins) to see these rapid innovation capabilities in action.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/MLrqJ1sxkjQ\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\nThe following is a non-exhaustive list of ways that GitLab helps your teams to achieve rapid innovation.\n\n### Easy collaboration across multiple roles and asset types\n\nApplication creators and stakeholders within every organization come from many disciplines, often times each using their own file types to get work done. For example:\n* **Product Designers** typically work with the output of their design tools, which could be Figma or Sketch files, images, or graphs.\n* **Developers** mainly work with programming language source files (code).\n* **DevOps Engineers** might use Infrastructure-as-code files, like Terraform, CloudFormation, or Azure Resource Manager files\n* **Database Administrators** often use Data Definition Language (DDL), Data Manipulation Language (DML), and SQL scripts.\n\nWhereas other CI/CD solutions typically stick to one type of asset, with GitLab, stakeholders can easily collaborate and contribute using their preferred asset types as part of a single conversation across the whole software development lifecycle. Not only does this enrich the conversation between all stakeholders, but it speeds up the innovation process by lowering the barrier for cross team collaboration.\n\n![issue with design picture](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/issue-with-design-picture.png){: .shadow.medium.center.wrap-text}\n\n### Security and compliance\n\nSecurity and compliance is usually a top priority for CIOs and directly affects how code is developed throughout the end-to-end SDLC. It's critical to protect your IP and equally important for customers to take confidence in the fact that their sensitive data is safe and secure. Instead of putting together your own mechanisms to check security vulnerabilities, license compliance, dependency scanning, static and dynamic application security testing, performance, fuzz testing, among others, GitLab provides you with built-in templates to do all these from within your CI pipeline. All you have to do is include them in your pipeline and voila! By leveraging these templates you can more quickly focus on creating and innovating.\n\n![build and test pipeline](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/build-and-test-pipeline.png){: .shadow.medium.center.wrap-text}\n\n### Review Apps\n\nWouldn’t it be great if you could effortlessly enable all stakeholders to review the application changes BEFORE they are merged to the main branch? Instead of orchestrating and putting together a review environment and building, loading and executing the application to it for every update, you can leverage GitLab Review Apps capability, which streamlines the review process by automatically creating (and cleaning up) temporary review environments with every change. This let's developers focus on innovation instead of environment setup.\n\n![review pipeline](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/review-pipeline.png){: .shadow.medium.center.wrap-text}\n\n### Deep Kubernetes integration\n\nAnother way that GitLab uniquely enables you to innovate more rapidly is the deep integration to Kubernetes clusters, which not only includes the automatic creation of and deployment to K8s clusters, but also includes automatic cluster monitoring, per application metrics, and the one-click deployment and management of a variety of supplemental applications such as a Web Application Firewall, Cert-Manager, Prometheus, GitLab Runner, Crossplane, JupyterHub, Elastic Stack, Fluentd, Knative, and GitLab Container Network Policies.\n\nKubernetes clusters can be set up by developers at their project level or by admins at the group levels, enabling developers to take advantage of container-based development best practices without needing deep subject matter expertise. This allows developers to spend more of their time working on what matters: creating great product.\n\n![K8s apps](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/K8s-apps.png){: .shadow.medium.center.wrap-text}\n\n### Automatic environments management\n\nGitLab will automatically spin up and tear down environments as needed by the CI/CD pipeline. For example, GitLab automatically spins up pods for the review, staging and production environments. All this infrastructure automation removes the burden of having to manage infrastructure off of your shoulders so that you can spend more time developing and creating code faster.\n\n![environments](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/environments.png){: .shadow.medium.center.wrap-text}\n\n### Pipeline template creation\n\nOnce you create a pipeline based on the best practices for your organization, you can turn it into a pipeline template that your development teams can use. Other developers can reuse this new template in their projects so that they can get right to creating and innovating differentiating features and applications that matter to their consumers.\n\n![steps to create pipeline template](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/steps-create-pipeline-template.png){: .shadow.medium.center.wrap-text}\n\n### Auto DevOps\n\nIf you’d like to leverage a complete DevOps predefined CI/CD pipeline, which is based on best practices, why not use Auto DevOps? Auto DevOps allows you to automatically detect, build, test, deploy, and monitor your applications. Leveraging CI/CD best practices and tools, Auto DevOps aims to simplify the setup and execution of a mature and modern software development lifecycle. The Auto DevOps pipeline shifts work left to find and prevent defects as early as possible in the software delivery process. The pipeline then deploys the application to staging for verification and then to production in an incremental fashion. As you can see, Auto DevOps saves you from implementing your own pipeline so that you can spend more time innovating.\n\n![partial auto devops pipeline](https://about.gitlab.com/images/blogimages/enables-rapid-innovation/partial-Auto-DevOps-pipeline.png){: .shadow.medium.center.wrap-text}\n\nThese are some of the ways GitLab uniquely enables you to innovate more rapidly by ensuring that everything is where you need it when you need it, empowering you to focus on creating and developing innovations, delivering solutions faster, putting new products and services more quickly in the hands of your customers and remaining competitive. And all within a single application.\n\nFor more videos and demos visit [Learn@GitLab](https://about.gitlab.com/learn/).\nTo learn more about how GitLab can help you innovate more rapidly visit [the GitLab website](https://about.gitlab.com)\n\nCover image by [Florian Bernhardt](https://unsplash.com/@floww?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/rapids?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,721,896],{"slug":2216,"featured":6,"template":700},"enables-rapid-innovation","content:en-us:blog:enables-rapid-innovation.yml","Enables Rapid Innovation","en-us/blog/enables-rapid-innovation.yml","en-us/blog/enables-rapid-innovation",{"_path":2222,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2223,"content":2229,"config":2236,"_id":2238,"_type":14,"title":2239,"_source":16,"_file":2240,"_stem":2241,"_extension":19},"/en-us/blog/ensure-auto-devops-work-after-helm-stable-repo",{"title":2224,"description":2225,"ogTitle":2224,"ogDescription":2225,"noIndex":6,"ogImage":2226,"ogUrl":2227,"ogSiteName":685,"ogType":686,"canonicalUrls":2227,"schema":2228},"Adapting Auto DevOps & managed apps to Helm repo changes","The Helm stable repository will be removed this month. We explain how to keep Auto DevOps and GitLab Managed Apps working.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667182/Blog/Hero%20Images/maximilian-weisbecker-Esq0ovRY-Zs-unsplash.jpg","https://about.gitlab.com/blog/ensure-auto-devops-work-after-helm-stable-repo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to keep GitLab Auto DevOps and Managed Apps working after Helm stable repo is removed\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Thong Kuah\"}],\n        \"datePublished\": \"2020-11-09\",\n      }",{"title":2230,"description":2225,"authors":2231,"heroImage":2226,"date":2233,"body":2234,"category":1062,"tags":2235},"How to keep GitLab Auto DevOps and Managed Apps working after Helm stable repo is removed",[2232],"Thong Kuah","2020-11-09","The Helm project announced that the Helm Stable repository will be\n[removed](https://www.cncf.io/blog/important-reminder-for-all-helm-users-stable-incubator-repos-are-deprecated-and-all-images-are-changing-location/)\non November 13. This change impacts GitLab [Auto\nDevOps](https://docs.gitlab.com/ee/topics/autodevops/index.html) and [GitLab\nManaged Apps](https://docs.gitlab.com/ee/update/removals.html).\n\n\n## How Auto DevOps is impacted\n\n\nRemoving the Helm stable repository affects Auto Deploy and Auto Review Apps\nstages of Auto DevOps. The deploy jobs from these stages will fail because\nthey cannot fetch the Helm stable repository. GitLab has mitigated this in\nGitLab 13.6 by switching to a [Helm Stable Archive\nrepository](https://gitlab.com/gitlab-org/cluster-integration/helm-stable-archive)\nmaintained by GitLab.\n\n\nIn case Auto DevOps pipelines are failing because of this problem, you can:\n\n\n1. Upgrade to GitLab 13.6.0 when it is released, or\n\n1. If you are on GitLab 13.5.X, you can also upgrade to GitLab 13.5.3\n\n1. If you are on GitLab 13.4.X, you can also upgrade to GitLab 13.4.6\n\n1. Specify a newer version of the `auto-deploy-image` image, which contains\nthe fix, in your `.gitlab-ci.yml` file:\n\n    ```\n    include:\n      - template: Auto-DevOps.gitlab-ci.yml\n\n    .auto-deploy:\n      image: \"registry.gitlab.com/gitlab-org/cluster-integration/auto-deploy-image:v1.0.7\"\n    ```\n\nNot all users will be affected by the change. Users who are not using Helm\nas part of Auto DevOps, for example, those that are not using Kubernetes\n(Auto Deploy to AWS targets) will not be impacted by the removal of the Helm\nstable respository.\n\n\n## How GitLab managed apps are impacted\n\n\nThe removal of the Helm stable repository affects installation of the\nIngress, Fluentd, Prometheus, and Sentry apps. These apps will fail to\ninstall as the Helm stable repository is removed. For the following:\n\n\n[GitLab Managed Apps](https://docs.gitlab.com/ee/update/removals.html):\nGitLab has mitigated this problem in [GitLab\n13.5.0](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/44875) by\nswitching to a [Helm Stable Archive\nrepository](https://gitlab.com/gitlab-org/cluster-integration/helm-stable-archive)\nmaintained by GitLab.\n\n\nThere are a few ways to fix app installation failures because of the Helm\nstable repository was removed.\n\n\n1. Upgrade to GitLab 13.5.0 or later, or\n\n1. If you are on GitLab 13.4.X, you can also upgrade to GitLab 13.4.6.\n\n1. If you are on GitLab 13.3.X, you can also upgrade to GitLab 13.3.8.\n\n\nGitLab has mitigated the problem in [GitLab\n13.6](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/45487) for users\nwith [GitLab Managed Apps using\nCI/CD](https://docs.gitlab.com/ee/update/removals.html) by switching to a\n[Helm Stable Archive\nrepository](https://gitlab.com/gitlab-org/cluster-integration/helm-stable-archive)\nmaintained by GitLab.\n\n\nIn case GitLab Managed Apps CI/CD installation pipelines are failing because\nof this problem, you can:\n\n\n1. Upgrade to GitLab 13.6.0 when it is released, or\n\n1. Specify a newer version of the `cluster-applications` image, which\ncontains the fix, in your `.gitlab-ci.yml` file:\n\n    ```\n    include:\n      - template: Managed-Cluster-Applications.gitlab-ci.yml\n\n    apply:\n      image: \"registry.gitlab.com/gitlab-org/cluster-integration/cluster-applications:v0.34.1\"\n    ```\n\nIf you are installing applications that were not hosted in the Helm stable\nrepository such as GitLab Runner, these applications will not be affected.\n\n\n## Learn more about the project\n\n\n- [Epic for Helm chart\ndeprecation](https://gitlab.com/groups/gitlab-org/-/epics/4695)\n\n- [Information on error\nalert](https://docs.gitlab.com/ee/topics/autodevops/#error-error-initializing-looks-like-httpskubernetes-chartsstoragegoogleapiscom-is-not-a-valid-chart-repository-or-cannot-be-reached)\n\n- [Information on Helm chart change from\nCNCF](https://www.cncf.io/blog/important-reminder-for-all-helm-users-stable-incubator-repos-are-deprecated-and-all-images-are-changing-location/)\n\n\nCover image by Maximilian Weisbecker on [Unsplash](https://unsplash.com/)\n\n{: .note}\n",[721,9,1062],{"slug":2237,"featured":6,"template":700},"ensure-auto-devops-work-after-helm-stable-repo","content:en-us:blog:ensure-auto-devops-work-after-helm-stable-repo.yml","Ensure Auto Devops Work After Helm Stable Repo","en-us/blog/ensure-auto-devops-work-after-helm-stable-repo.yml","en-us/blog/ensure-auto-devops-work-after-helm-stable-repo",{"_path":2243,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2244,"content":2250,"config":2255,"_id":2257,"_type":14,"title":2258,"_source":16,"_file":2259,"_stem":2260,"_extension":19},"/en-us/blog/faq-gitlab-ci-cd-catalog",{"title":2245,"description":2246,"ogTitle":2245,"ogDescription":2246,"noIndex":6,"ogImage":2247,"ogUrl":2248,"ogSiteName":685,"ogType":686,"canonicalUrls":2248,"schema":2249},"FAQ: GitLab CI/CD Catalog","Unlock the full potential of the CI/CD Catalog with expert tips and answers to common questions, including how to create and share components.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098783/Blog/Hero%20Images/Blog/Hero%20Images/cicdcover_5vLe737i4QfvAqv6PnqUaR_1750098782745.png","https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"FAQ: GitLab CI/CD Catalog\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"},{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2024-08-01\",\n      }",{"title":2245,"description":2246,"authors":2251,"heroImage":2247,"date":2252,"body":2253,"category":718,"tags":2254},[1835,1567],"2024-08-01","The [GitLab CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/), part of the DevSecOps platform, allows users to discover, reuse, and contribute [CI/CD](https://about.gitlab.com/topics/ci-cd/) components to make software development more efficient and productive. Recently, we hosted a CI/CD Catalog webinar that surfaced a host of helpful questions. This FAQ features some of those questions (and answers) and highlights the CI/CD Catalog's capabilities as well as best practices for using it in your environment.\n\n***When will the CI catalog components and inputs be available on Gitlab.com?***\n\nThe [CI catalog components and inputs became generally available](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/) starting GitLab 17.0 (in GitLab.com and self-managed).  \n\n***What about versioning components? Often a pipeline is coupled with the code, and we want a way to re-run a release pipeline from an older version of the code. Do we have options for version components similarly to how we do the application?***\n\nWe have full support for version control – at any given time you can use any earlier version.\n\n***Can we have composite components that use multiple other components?***\n\nAbsolutely! Here is an example of a deploy component that uses a validate component.\n\n![example of a deploy component that uses validate component](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098788/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098788135.png)\n\n***What are the options for testing components?*** \n\nThere are several methods of testing components. The first method is mentioned in [the documentation](https://docs.gitlab.com/ee/ci/components/examples.html#test-a-component): Including a component using `$CI_COMMIT_SHA` (instead of version), you can test your component for every single commit. Another strategy is to use [child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html#parent-child-pipelines), which allows you to test a component with different inputs parameters. More details can be found in the [GitLab forum](https://forum.gitlab.com/t/ci-cd-component-testing-strategies/102983/2?u=leetickett-gitlab).\n\n***Can the component reference URL use a branch name as the version, similar to how docs show a tag (e.g., $CI_SERVER_FQDN/my-org/security-components/secret-detection@master)?*** \n\nYes, you can use a branch name. [The CI/CD Catalog documentation](https://docs.gitlab.com/ee/ci/components/#component-versions) lists components versions.  \n\n***How can you show the catalog in self-managed instances?***\n\nA self-managed catalog will be available, but will be empty without any published components. You can use this catalog internally in your organization and it is up for you and your teams to populate it with the appropriate components. Alternatively, you can mirror existing components projects from Gitlab.com to your self-managed   instance.\n\n***Can we clone the public repo into a self-hosted instance?*** \n\nA component is hosted in a GitLab project and like any other project it can be cloned locally. Follow these instructions on [how to mirror a component from GitLab.com to self-managed instance](https://docs.gitlab.com/ee/ci/components/#use-a-gitlabcom-component-in-a-self-managed-instance).\n\n***How can you prevent name collisions with CI/CD component jobs?***\n\nUse inputs to specify dynamic job names, which will allow you to [include the same component multiple times in the same pipeline](https://docs.gitlab.com/ee/ci/yaml/inputs.html#include-the-same-file-multiple-times).\n\n***Is it possible to inspect the source code of components in the catalog?***\n\nYes, to view the source code, from the catalog open a component you would like to view. Then, click the component name – this will open the project where the component is hosted and you can find the component’s .yml file in the component's templates folder.\n\n***Can a component receive an array of data as input parameter?***\n\n[A component can receive multiple data types](https://docs.gitlab.com/ee/ci/yaml/?query=inputs#specinputstype) such as string, boolean, number, and array.\n\n***Can the component reference more files alongside the .yml file?***\n\nNo, it can’t. This capability is available in [CI Steps](https://docs.gitlab.com/ee/ci/steps/) (which is experimental).\n\n***Can we have anti-patterns for CI/CD components?***\n\nPlease [follow the best practice section in the documentation](https://docs.gitlab.com/ee/ci/components/#write-a-component).\n\n***Is it possible to limit a group to only using components owned by the group (i.e., not allowing community components)?***\n\nNot yet, but [this feature is on our roadmap](https://gitlab.com/gitlab-org/gitlab/-/issues/441102).\n\n***Is the GitLab CI Steps feature related to this component in any way?***\n\nYes, it is, we consider CI Steps as another type of component. More details can be found in [the CI Steps documentation](https://docs.gitlab.com/ee/ci/steps).\n\n***Is it possible to make private components for your organization only?***\n\nYes, the [component's visibility](https://docs.gitlab.com/ee/ci/components/#view-the-cicd-catalog) is based on the visibility level of your project and only members that have the privileges to see the project can view and search the component in the catalog.\n\n***What is the best approach if I need to fork a Gitlab.com component in terms of GitLab flow to manage the forked repo and propose changes when needed to the original repo?***\n\nYou can manage your fork similarly to how you manage any Git repository – by making changes in your fork and then creating merge requests to propose changes back to the original repository.\n\n***Is there any difference in source code standardization between a verified creator and a non-verified creator in the catalog? Do verified creators have to follow a higher standard?***\n\nCurrently, there is no process to verify and approve individual creators from our extended community. However. we do have a [process for GitLab partners and GitLab-maintained components](https://docs.gitlab.com/ee/ci/components/#verified-component-creators).\n\n***How would you recommend implementing tools like Fortify SCA into your CI/CD pipeline?***\n\nTwo options would be possible: Either Fortify would need to create a shared component in the catalog that exposes the necessary elements for public consumption, or, if publicly-available APIs exist, the community can build an open-source component to be shared and used by others in the catalog.\n\n***What sort of patterns do you recommend for providing \"outputs\" from components that are consumed by other jobs/components in the including pipeline?***\n\nThere is no ability to specify outputs for components, but this is on the roadmap with a new capability called [CI Steps](https://docs.gitlab.com/ee/ci/steps/).\n\n***Is there any plan to label components?***\n\nYes! in this [GitLab epic](https://gitlab.com/groups/gitlab-org/-/epics/11917), we have several issues to enhance searching and discoverability by content type, tags, and category.\n\n***Will existing CI/CD templates be migrated to components?***\n\nYes, the GitLab templates are migrated and have a special badge in the CI/CD Catalog.\n\n***What's the recommended way to transition from our existing GitLab pipeline templates to GitLab catalog components?***\n\nThis should be rather simple since components are very similar to templates. We would recommend start using inputs in your templates, and later on moving them to the appropriate folder structure.\n\n> Learn more about the CI/CD Catalog and components:\n>  \n> - [CI/CD Catalog goes GA: No more building pipelines from scratch](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/)\n> \n> - [A CI/CD component builder's journey](https://about.gitlab.com/blog/a-ci-component-builders-journey/)\n>\n> - [Documentation: CI/CD components and CI/CD Catalog](https://docs.gitlab.com/ee/ci/components/)\n>\n> - [Introducing CI/CD components and how to use them in GitLab](https://about.gitlab.com/blog/introducing-ci-components/)\n>\n",[9,696],{"slug":2256,"featured":91,"template":700},"faq-gitlab-ci-cd-catalog","content:en-us:blog:faq-gitlab-ci-cd-catalog.yml","Faq Gitlab Ci Cd Catalog","en-us/blog/faq-gitlab-ci-cd-catalog.yml","en-us/blog/faq-gitlab-ci-cd-catalog",{"_path":2262,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2263,"content":2269,"config":2274,"_id":2276,"_type":14,"title":2277,"_source":16,"_file":2278,"_stem":2279,"_extension":19},"/en-us/blog/feature-flags-continuous-delivery",{"title":2264,"description":2265,"ogTitle":2264,"ogDescription":2265,"noIndex":6,"ogImage":2266,"ogUrl":2267,"ogSiteName":685,"ogType":686,"canonicalUrls":2267,"schema":2268},"Learn more about Feature Flags: The next step in Progressive Delivery","How Feature Flags are continuing the next evolution of continuous delivery.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670020/Blog/Hero%20Images/feature-flags.jpg","https://about.gitlab.com/blog/feature-flags-continuous-delivery","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learn more about Feature Flags: The next step in Progressive Delivery\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-08-06\",\n      }",{"title":2264,"description":2265,"authors":2270,"heroImage":2266,"date":2271,"body":2272,"category":718,"tags":2273},[715],"2019-08-06","\n\n[DevOps](/topics/devops/) is always evolving. Continuous delivery made a major impact on the way software is deployed, but we don’t think the innovation stops there. As we move into more of a [multi-cloud](/topics/multicloud/), hybrid development world, continuous delivery has continued to change into something more “progressive.”\n\n[Progressive Delivery](https://redmonk.com/jgovernor/2018/08/06/towards-progressive-delivery/) isn’t exactly the new idea that continuous delivery was; it’s simply a continuation of it. What Progressive Delivery does is give more precision to the delivery process through new ideas and best practices, reducing the risks of one big, risky deployment. At GitLab, we think Progressive Delivery is the next logical evolution of DevOps beyond CI/CD and will become the default way to release software in the future.\n\nWe previously discussed [how Review Apps can enable Progressive Delivery](/blog/progressive-delivery-using-review-apps/), and today we’ll discuss the targeted rollout process of Feature Flags.\n\n## What are Feature Flags?\n\n[Feature Flags](/direction/release/feature_flags/) (also known as feature toggles, feature flippers, or feature switches) give developers the ability to roll out features selectively without changing the source code. Incomplete features can be merged into the production code but flagged on or off, which allows many small, incremental versions of software to be delivered without the cost of constant branching and merging.\n\nFeature Flags are designed to minimize the blast radius of releasing new features. By utilizing Feature Flags, developers can release to a subset of users and roll back easily through toggling, leaving the live code intact. A feature can also be tested before it’s completed and ready for release. This technique allows developers to release a version of a product that has unfinished features which are hidden (toggled) so they do not appear in the user interface.\n\n[Martin Fowler organizes Feature Flags into four different categories](https://martinfowler.com/articles/feature-toggles.html) based on how long they’re typically in place and how dynamic they should be:\n\n*   **Release toggles**: A temporary flag which allows incomplete, latent code to be shipped to production and turned on or off, or perhaps never enabled at all.\n*   **Experiment toggles**: A short-lived toggle usually used for multivariate A/B testing, kept in place only long enough to gather results.\n*   **Ops toggles**: For releases that have unclear performance implications, this toggle allows system administrators to roll back quickly, but it’s not unheard of for long-term toggles to remain in place as a kill switch.\n*   **Permission toggles**: Manages features for specific users, such as “premium” features, alpha or beta features, or even internal features. These toggles can be very long-lived.\n\nFeature Flags can be a quick way to do [version control](/topics/version-control/) so that [continuous delivery](/topics/continuous-delivery/) remains continuous. Their ability to turn off or on with simple commands makes Feature Flags a low-risk option for introducing new features. While they’re easy to use, they can have some drawbacks if not implemented properly.\n\n## Working with Feature Flags\n\nSome worry about the added complexity with Feature Flags, since code may need to be tested with toggles on and off, essentially doubling the load. While it’s not necessary to test every toggle configuration, a best practice is for developers to test code that has the greatest likelihood of going live in production. According to Martin Fowler, a good convention is to enable existing or legacy behavior when a Feature Flag is Off, and new or future behavior when it's On.\n\nAnother risk of using Feature Flags is stale flags, a situation when flags are left in the code and forgotten about. As teams add more and more flags into their code, it can become harder to keep track of and verify the flags.\n\nToday, organizations rely on feature management systems such as [Launch Darkly](https://launchdarkly.com/) or [Optimizely](https://blog.optimizely.com/2017/10/18/feature-management/) in order to use Feature Flags. As with any link in a toolchain, this adds an additional level of oversight that can be hard to manage and maintain. Analysts recognize that feature-toggling capabilities are becoming more of what's fundamentally needed for a continuous delivery platform. While we are still in the early stages of Feature Flags, we do have some alpha Feature Flag capabilities already built into GitLab you can try out today, and we will be launching additional functionality in 12.2:\n\n*   [Feature Flags enabled for specific users](https://gitlab.com/gitlab-org/gitlab-ee/issues/11459)\n*   [Percent rollout per environment](https://gitlab.com/gitlab-org/gitlab-ee/issues/8240)\n\n## GitLab and Progressive Delivery\n\nAs we continue to iterate on our [product vision for CI/CD](/direction/ops/#progressive-delivery), we’re adopting a Progressive Delivery mindset for how we implement new features into GitLab. As a complete [DevOps platform](/solutions/devops-platform/), delivered as a [single application](/topics/single-application/), it’s important for us to offer a comprehensive solution that offers the latest best practices. Review Apps, Canary Deployments, and Feature Flags are just some of the ways we’re bringing Progressive Delivery to the GitLab community.\n\nTo learn more about how we’re using Feature Flags and Feature flag best practicies in GitLab, watch this deep dive with our Director of Product Management, [Jason Yavorska](/company/team/#jyavorska).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/TSSqNUhbbmQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nFeature Flags can be a useful way to validate and measure performance before rolling out a feature to a broader audience. High visibility makes DevOps more efficient, and integrating Feature Flags into the same application where your code repositories, CI/CD, project planning, and monitoring occurs can overcome many of the challenges associated with Feature Flags.\n\nLearn how GitLab’s built-in CI/CD helps teams implement Progressive Delivery tools such as [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/), [Feature Flags](/direction/release/feature_flags/), and [Canary Deployments](https://docs.gitlab.com/ee/user/project/canary_deployments.html), without the complicated integrations and plugin maintenance.\n\n[Explore GitLab CI/CD](/solutions/continuous-integration/)\n{: .alert .alert-gitlab-purple .text-center}\n\nCover image by [Chris Lawton](https://unsplash.com/@chrislawton?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/flags?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,695],{"slug":2275,"featured":6,"template":700},"feature-flags-continuous-delivery","content:en-us:blog:feature-flags-continuous-delivery.yml","Feature Flags Continuous Delivery","en-us/blog/feature-flags-continuous-delivery.yml","en-us/blog/feature-flags-continuous-delivery",{"_path":2281,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2282,"content":2288,"config":2293,"_id":2295,"_type":14,"title":2296,"_source":16,"_file":2297,"_stem":2298,"_extension":19},"/en-us/blog/fluentd-using-gitlab-ci-cd",{"title":2283,"description":2284,"ogTitle":2283,"ogDescription":2284,"noIndex":6,"ogImage":2285,"ogUrl":2286,"ogSiteName":685,"ogType":686,"canonicalUrls":2286,"schema":2287},"Thanks Fluentd for betting on GitLab CI/CD!","We're happy to support fresh CNCF graduate Fluentd with GitLab CI/CD, and excited about their latest innovation offering stream processing on the edge.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678614/Blog/Hero%20Images/gitlab-fluentd.png","https://about.gitlab.com/blog/fluentd-using-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Thanks Fluentd for betting on GitLab CI/CD!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Priyanka Sharma\"}],\n        \"datePublished\": \"2019-05-21\",\n      }",{"title":2283,"description":2284,"authors":2289,"heroImage":2285,"date":2290,"body":2291,"category":783,"tags":2292},[2015],"2019-05-21","\nFluentd, the [latest project to graduate](https://www.fluentd.org/blog/fluentd-cncf-graduation) in the CNCF, announced on stage at KubeCon Barcelona today that it is using [GitLab CI/CD](/solutions/continuous-integration/) for continuous integration. We are thrilled about the shout out and honored to support such an influential and innovative project.\n\nFor those who haven’t yet worked with Fluentd, it is an [open source data collector](https://www.fluentd.org/architecture), which lets you unify the data collection and consumption for a better use and understanding of data. Fluent Bit is their lighter-weight forwarder for those with exacting memory requirements. The project sports 7,868 stars on GitHub and their community has contributed more than 900 contributed plugins. They witness more than 100K downloads a day!\n\nThe latest innovation from Fluentd around [stream processing on the edge](https://docs.fluentbit.io/stream-processing/) can be very useful for our industry. As many of those who monitor large-scale, complex, distributed systems, run IoT businesses, or build smart cities will attest, more and more data is generated by these systems and analysis often needs to happen blazingly fast to be meaningful. The standard data analysis model, where it is first stored and indexed in a database (presumably in some cloud) and then analyzed, is not good enough for some real-time and complex analysis needs. The latencies associated with such data transfer may not be able to support applications involving time-critical, data-driven decision making. With Fluent bit, the Fluent team is looking to process the data while it's still in motion in the Log processor – bringing a lot of advantages of speed.\n\nWhile I am reading papers by others attempting to build stream processing on the edge, I find Fluentd’s efforts exciting because they already have major community traction and are part of companies’ observability workflows for logging. The [CNCF graduation criteria](https://github.com/cncf/toc/blob/master/process/graduation_criteria.adoc) that Fluentd met will further embolden enterprises to try it out, as part of the requirements are a diverse contributor community and security audits.\n\nWe've spent the past few months collaborating with Fluentd on their CI needs, and it's been very educational for us. We learned about the unique challenges that fast-moving projects in the CNCF face, and how we can be of assistance with our CI/CD offering. A large part of the answer is providing clear and consistent guidance around converting pipelines and then supporting the projects to success. If you are a CNCF project interested in working with GitLab CI/CD, holler at us and we’d be delighted to help.\n\nUntil then, enjoy KubeCon Barca!\n",[9,827,896,830,278,1228],{"slug":2294,"featured":6,"template":700},"fluentd-using-gitlab-ci-cd","content:en-us:blog:fluentd-using-gitlab-ci-cd.yml","Fluentd Using Gitlab Ci Cd","en-us/blog/fluentd-using-gitlab-ci-cd.yml","en-us/blog/fluentd-using-gitlab-ci-cd",{"_path":2300,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2301,"content":2306,"config":2313,"_id":2315,"_type":14,"title":2316,"_source":16,"_file":2317,"_stem":2318,"_extension":19},"/en-us/blog/forrester-tei",{"title":2302,"description":2303,"ogTitle":2302,"ogDescription":2303,"noIndex":6,"ogImage":1200,"ogUrl":2304,"ogSiteName":685,"ogType":686,"canonicalUrls":2304,"schema":2305},"Estimate your GitLab ROI with Forrester's economic study","Now available: A new Forrester ROI study and calculator based on real value customers got from using GitLab for SCM, CI, and CD.","https://about.gitlab.com/blog/forrester-tei","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Discover your GitLab return on investment with the Forrester Total Economic Impact™ Study and Estimator\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Colin Fletcher\"}],\n        \"datePublished\": \"2020-07-29\",\n      }",{"title":2307,"description":2303,"authors":2308,"heroImage":1200,"date":2134,"body":2310,"category":1062,"tags":2311},"Discover your GitLab return on investment with the Forrester Total Economic Impact™ Study and Estimator",[2309],"Colin Fletcher","\n\nWe consistently hear from the global GitLab family (our community, customers, and really anybody interested in GitLab) that they know from experience that GitLab helps them do the work they want to do, faster and better, and that it’s a valuable, even vital, part of their success. But they often have a difficult time describing the value GitLab delivers, especially in specific, quantified ways. We also regularly hear that the hardest part about quantifying \"value\" is knowing where and how to start. \n\n**Enter the Forrester Total Economic Impact™ (TEI) of GitLab: studying real customer experiences**\n \nSo to help everyone better understand the value proposition, GitLab commissioned Forrester Consulting to conduct a [Total Economic Impact™ (TEI) study](/resources/report-forrester-tei/) examining the potential return on investment (ROI) organizations may realize by using GitLab for version control & collaboration (VC&C)/SCM, [continuous integration (CI), and continuous delivery (CD)](/topics/ci-cd/) - all use cases that represent where many teams begin or expand their use of GitLab.  \n\nTo start, GitLab customers were independently interviewed by Forrester Consulting. The interview experiences and any other data collected was then used to create multiple models which in turn generated quantified results based on the combined experiences of all of the customers studied. The data collected, resulting models, and study itself were then reviewed independently by Forrester Research analysts. GitLab stakeholders were also interviewed as part of the data gathering and review process.  \n\n**Significant results and useful tools to discover your ROI**\n\nJust a sampling of the results realized by the composite organization over an analysis period of three years, based on GitLab customer experiences, yielded these potential, quantifiable benefits in the form of:  \n\n- An overall 407% return on investment (ROI) \n- Improved development and delivery efficiency \n  - Ex. 87% improved development and delivery efficiency (reduced time), resulting in over $23 million in savings \n- Revenue from increased number of releases \n  - Ex. 12x increase in the number of revenue generating application releases in a year, resulting in $12.3 million in additional revenue \n- Improved Code Quality \n  - Ex. 80% reduction in code defects, resulting in over $16.8 million in savings \n- Savings from reducing the number of tools in use \n  - Ex. $3.7 million in savings from using four fewer tools (with their associated costs) each year  \n\nNow these results, while impressive, are based on the experiences of the GitLab customers studied and as with all models, your own unique experience will vary. As such we encourage you to spend time looking over [the study](/resources/report-forrester-tei/) to better understand where the numbers came from and how they may or may not relate to your situation and what you are working to achieve.  \n\nTo help you take the next step of estimating your own potential results, we are thrilled to make available an [online estimator](https://tools.totaleconomicimpact.com/go/gitlab/devopsplatform/index.html) that is based on the TEI study’s models. Enter your own data and you'll get a customized version of the study.  \n\n**Couldn’t have done it without you**\n\nLastly, we want to offer our deepest thanks to the incredibly generous GitLab customers who were willing to share their experiences in this way. They helped all of us in our respective journeys. Thank you! \n\n**Get started today!** \n\n- [Download the Forrester Total Economic Impact™ Study commissioned By GitLab, June 2020](/resources/report-forrester-tei/)\n- \u003Ca href=\"https://tools.totaleconomicimpact.com/go/gitlab/devopsplatform/index.html\" target=\"_blank\">Fill out your info in the online estimator and get a custom report based on the TEI study data and models\u003C/a>\n",[9,873,721,1062,2312,763],"research",{"slug":2314,"featured":6,"template":700},"forrester-tei","content:en-us:blog:forrester-tei.yml","Forrester Tei","en-us/blog/forrester-tei.yml","en-us/blog/forrester-tei",{"_path":2320,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2321,"content":2326,"config":2332,"_id":2334,"_type":14,"title":2335,"_source":16,"_file":2336,"_stem":2337,"_extension":19},"/en-us/blog/free-period-for-cicd-external-repositories",{"title":2322,"description":2323,"ogTitle":2322,"ogDescription":2323,"noIndex":6,"ogImage":1200,"ogUrl":2324,"ogSiteName":685,"ogType":686,"canonicalUrls":2324,"schema":2325},"The free period of CI/CD for GitHub is ending soon","The free-of-charge use of CI/CD for GitHub is ending soon, so you'll need to upgrade to continue using this feature.","https://about.gitlab.com/blog/free-period-for-cicd-external-repositories","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The free period of CI/CD for GitHub is ending soon\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Parker Ennis\"}],\n        \"datePublished\": \"2020-03-12\",\n      }",{"title":2322,"description":2323,"authors":2327,"heroImage":1200,"date":2329,"body":2330,"category":1062,"tags":2331},[2328],"Parker Ennis","2020-03-12","\n\n[CI/CD for GitHub](/solutions/github/) is a feature that lets you use any Git-based repository as a host in combination with GitLab CI/CD regardless of where your source code lives – [GitHub](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/github_integration.html), [Bitbucket](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/bitbucket_integration.html), or any other Git server. To introduce this feature to the large number of users with private repos hosted on GitHub.com, we made it available to users [free of charge](/blog/six-more-months-ci-cd-github/) for a limited time only.\n\nWe then [extended the free period](/blog/ci-cd-github-extended-again/) for an additional limited time. We’ve set the final end date for this free period for March 22, 2020.\n\nIf you wish to continue using [CI/CD for private external repositories](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/index.html) past March 22, 2020, you will need to upgrade your plan to at least a [Silver plan](/pricing/).\n\nOf course, you always have the option of migrating your project to [GitLab.com](https://docs.gitlab.com/ee/user/project/import/github.html). As part of our commitment to our value of transparency and open source, all public repositories on GitLab.com get all of the features in our top-tier Gold plan for free. If your repo on GitHub.com is public, then it gets mirrored to GitLab.com as a public repo and you have access to CI/CD capabilities.\n\nNote: If you are only using [repository mirroring](https://docs.gitlab.com/ee/user/project/repository/repository_mirroring.html#pulling-from-a-remote-repository) without CI/CD then you only need a Bronze plan to continue using this functionality.\n\nSo, what exactly does this mean for you?\n\n*  The ability to mirror private external repositories and run CI/CD on them will no longer be available as of March 22, 2020, unless the repositories have been made public or you have upgraded to an eligible GitLab plan.\n*  Since [GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/) can only be published through GitLab CI, users who were using a GitHub repository with private projects and haven't upgraded to an eligible GitLab plan will be unable to have private pages.\n\nWe've designed this process to be a smooth transition for our users. If you have any additional questions about the change, or how this impacts you and your teams, please don’t hesitate to reach out:\n*  For general questions or pricing inquiries, please contact our **[Sales team](/sales/)**.\n*  For technical questions or concerns, please review our **[Support options](/support/)**.\n\nThanks!\n",[9,721,268,827],{"slug":2333,"featured":6,"template":700},"free-period-for-cicd-external-repositories","content:en-us:blog:free-period-for-cicd-external-repositories.yml","Free Period For Cicd External Repositories","en-us/blog/free-period-for-cicd-external-repositories.yml","en-us/blog/free-period-for-cicd-external-repositories",{"_path":2339,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2340,"content":2346,"config":2353,"_id":2355,"_type":14,"title":2356,"_source":16,"_file":2357,"_stem":2358,"_extension":19},"/en-us/blog/from-code-to-production-a-guide-to-continuous-deployment-with-gitlab",{"title":2341,"description":2342,"ogTitle":2341,"ogDescription":2342,"noIndex":6,"ogImage":2343,"ogUrl":2344,"ogSiteName":685,"ogType":686,"canonicalUrls":2344,"schema":2345},"From code to production: A guide to continuous deployment with GitLab","Learn how to get started building a robust continuous deployment pipeline in GitLab. Follow these step-by-step instructions, practical examples, and best practices.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659478/Blog/Hero%20Images/REFERENCE_-_Use_this_page_as_a_reference_for_thumbnail_sizes.png","https://about.gitlab.com/blog/from-code-to-production-a-guide-to-continuous-deployment-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"From code to production: A guide to continuous deployment with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Benjamin Skierlak\"},{\"@type\":\"Person\",\"name\":\"James Wormwell\"}],\n        \"datePublished\": \"2025-01-28\",\n      }",{"title":2341,"description":2342,"authors":2347,"heroImage":2343,"date":2350,"body":2351,"category":693,"tags":2352},[2348,2349],"Benjamin Skierlak","James Wormwell","2025-01-28","Continuous deployment is a game-changing practice that enables teams to\ndeliver value faster, with higher confidence. However, diving into advanced\ndeployment workflows — such as GitOps, container orchestration with\nKubernetes, or dynamic environments — can be intimidating for teams just\nstarting out.\n\n\nAt GitLab, we're committed to making delivery seamless and scalable. By\nenabling teams to focus on the fundamentals, we empower them to build a\nstrong foundation that supports growth into more complex strategies over\ntime. This guide provides essential steps to begin implementing continuous\ndeployment with GitLab, laying the foundation for your long-term success.\n\n\n## Start with a workflow plan\n\n\nBefore diving into the technical implementation, take time to map out your\ndeployment workflow. Success lies in careful planning and a methodical\napproach.\n\n\n### Artifact management strategy\n\n\nIn the context of continuous deployment, artifacts are the packaged outputs\nof your build process that need to be stored, versioned, and deployed. These\ncould be:\n\n\n- container images for your applications\n\n- packages\n\n- compiled binaries or executables\n\n- libraries\n\n- configuration files\n\n- documentation packages\n\n- other artifacts\n\n\nEach type of artifact plays a specific role in your deployment process. For\nexample, a typical web application might generate:\n\n\n- a container image for the backend service\n\n- a ZIP archive of compiled frontend assets\n\n- SQL files for database changes\n\n- environment-specific configuration files\n\n\nManaging these artifacts effectively is crucial for successful deployments.\nHere's how to approach artifact management.\n\n\n#### Artifacts and releases versioning strategies\n\n\nA best practice to get you started with a clean structure is to establish a\nclear versioning strategy for your artifacts. When creating releases:\n\n\n- Use semantic versioning (major.minor.patch) for release tags\n  - Example: `myapp:1.2.3` for a stable release\n  - Major version changes (2.0.0) for breaking changes\n  - Minor version changes (1.3.0) for new features\n  - Patch version changes (1.2.4) for bug fixes\n- Maintain a 'latest' tag for the most recent stable version\n  - Example: `myapp:latest` for automated deployments\n- Include commit SHA for precise version tracking\n  - Example: `myapp:1.2.3-abc123f` for debugging\n- Consider branch-based tags for development environments\n  - Example: `myapp:feature-user-auth` for feature testing\n\n#### Build artifacts retention\n\n\nImplement defined retention rules:\n\n\n- Set explicit expiration timeframes for temporary artifacts\n\n- Define which artifacts need permanent retention\n\n- Configure cleanup policies to manage storage\n\n\n#### Registry access and authentication\n\n\nSecure your artifacts with proper access controls:\n\n\n- Implement Personal Access Tokens for developer access\n\n- Configure CI/CD variables for pipeline authentication\n\n- Set up proper access scopes\n\n\n### Environment strategy\n\n\nConsider your environments early, as they shape your entire deployment\npipeline:\n\n\n- Development, staging, and production environment configurations\n\n- Environment-specific variables and secrets\n\n- Access controls and protection rules\n\n- Deployment tracking and monitoring approach\n\n\n### Deployment targets\n\n\nBe intentional as to where and how you'll deploy, these decisions matter and\nthe benefits and drawbacks of each should be consider:\n\n\n- Infrastructure requirements (VMs, containers, cloud services)\n\n- Network access and security configurations\n\n- Authentication mechanisms (SSH keys, access tokens)\n\n- Resource allocation and scaling considerations\n\n\nWith our strategy defined and foundational decisions made, we can now\ntranslate these plans into a working pipeline. We'll build a practical\nexample that demonstrates these concepts, starting with a simple application\nand progressively adding deployment capabilities.\n\n\n## Implementing your CD pipeline\n\n\n### A step-by-step example\n\n\nLet's walk through implementing a basic continuous deployment pipeline for a\nweb application. We'll use a simple HTML application as an example, but\nthese principles apply to any type of application. We’re also going to\ndeploy our application as a Docker image on a simple virtual machine. This\nwill allow us to lean on a curated image with minimum dependencies, and to\nensure no environment specific requirements are unintentionally brought in.\nBy working on a virtual machine, we won’t be leveraging GitLab’s native\nintegrations, allowing us to work on an easier but less scalable setup to\nbegin with.\n\n\n#### Prerequisites\n\n\nIn this example, we’ll aim to containerize an application that we’ll run on\na virtual machine hosted on a cloud provider. We’ll also test this\napplication locally on our machine. This list of prerequisites is only\nneeded for this scenario.\n\n\n##### Virtual machine setup\n\n\n- Provision a VM in your preferred cloud provider (e.g., GCP, AWS, Azure)\n\n- Configure network rules to allow access on ports 22, 80, and 443\n\n- Record the machine's public IP address for deployment\n\n\n##### Set up SSH authentication:\n\n\n- Generate a public/private key pair for the machine\n\n- In GitLab, go to **Settings > CI/CD > Variables**\n\n- Create a variable called `GITLAB_KEY`\n\n- Set Type to \"File\" (required for SSH authentication)\n\n- Paste the private key in the Value field\n\n- Define a USER variable, this is the user logging in and running the\nscripts on your VM\n\n\n##### Configure deployment variables\n\n\n- Create variables for your deployment targets:\n  - `STAGING_TARGET`: Your staging server IP/domain\n  - `PRODUCTION_TARGET`: Your production server IP/domain\n\n##### Local development setup\n\n\n- Install Docker on your local machine for testing deployments\n\n\n##### GitLab Container Registry access\n\n\n- Locate your registry path:\n  - Navigate to **Deploy > Container Registry**\n  - Copy the registry path (e.g., registry.gitlab.com/group/project)\n- Set up authentication:\n  - Go to **Settings > Access Tokens**\n  - Create a new token with registry access\n  - Token expiration: Maximum 1 year\n  - Save the token securely\n- Configure local registry access:\n\n\n```\n\ndocker login registry.gitlab.com\n\n# The username if you are using a PAT is gitlab-ci-token\n\n# Password: your-access-token\n\n```\n\n\n#### 1. Create your application\n\n\nStart with a basic web application. For our example, we're using a simple\nHTML page:\n\n\n```\n\n\u003C!-- index.html -->\n\n\u003Chtml>\n  \u003Chead>\n    \u003Cstyle>\n      body {\n        background-color: #171321; /* GitLab dark */\n      }\n    \u003C/style>\n  \u003C/head>\n  \u003Cbody>\n    \u003C!-- Your content here -->\n  \u003C/body>\n\u003C/html>\n\n```\n\n\n#### 2. Containerize your application\n\n\nCreate a Dockerfile to package your application:\n\n\n```\n\nFROM nginx:1.26.2\n\nCOPY index.html /usr/share/nginx/html/index.html\n\n```\n\n\nThis Dockerfile:\n\n\n- Uses nginx as a base image for serving web content\n\n- Copies your HTML file to the correct location in the nginx directory\nstructure\n\n\n#### 3. Set up your CI/CD pipeline\n\n\nCreate a `.gitlab-ci.yml` file to define your pipeline stages:\n\n\n```\n\nvariables:\n  TAG_LATEST: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_NAME:latest\n  TAG_COMMIT: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_NAME:$CI_COMMIT_SHA\n\nstages:\n  - publish\n  - deploy\n```\n\n\nLet's break it down:\n\n\n`TAG_LATEST` is made up of three parts:\n\n\n- `$CI_REGISTRY_IMAGE` is the path to your project's container registry in\nGitLab\n\n\nFor example: `registry.gitlab.com/your-group/your-project`\n\n\n- `$CI_COMMIT_REF_NAME` is the name of your branch or tag\n\n\nFor example, if you're on main branch: `/main`, and if you're on a feature\nbranch: `/feature-login`\n\n\n- `:latest` is a fixed suffix\n\n\nSo if you're on the main branch, `TAG_LATEST` becomes:\n`registry.gitlab.com/your-group/your-project/main:latest`.\n\n\n`TAG_COMMIT` is almost identical, but instead of `:latest`, it uses:\n`$CI_COMMIT_SHA` which is the commit identifier, for example:\n`:abc123def456`.\n\n\nSo for that same commit on main branch, `TAG_COMMIT` becomes:`\nregistry.gitlab.com/your-group/your-project/main:abc123def456`.\n\n\nThe reason for having both is `TAG_LATEST` gives you an easy way to always\nget the newest version, and `TAG_COMMIT` gives you a specific version you\ncan return to if needed.\n\n\n#### 4. Publish to the container registry\n\n\nAdd the publish job to your pipeline:\n\n\n```\n\npublish:\n  stage: publish\n  image: docker:latest\n  services:\n    - docker:dind\n  script:\n    - docker build -t $TAG_LATEST -t $TAG_COMMIT .\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - docker push $TAG_LATEST\n    - docker push $TAG_COMMIT\n```\n\n\nThis job:\n\n\n- Uses Docker-in-Docker to build images\n\n- Creates two tagged versions of your image\n\n- Authenticates with the GitLab registry\n\n- Pushes both versions to the registry \n\n\nNow that our images are safely stored in the registry, we can focus on\ndeploying them to our target environments. Let's start with local testing to\nvalidate our setup before moving to production deployments.\n\n\n#### 5. Deploy to your environment\n\n\nBefore deploying to production, you can test locally. We just published our\nimage to the GitLab repository, which we’ll pull locally. If you’re unsure\nof the exact path, navigate to **Deploy > Container Registry**, and you\nshould see an icon to copy the path of your image at the end of the line for\nthe container image you want to test.\n\n\n```\n\ndocker login registry.gitlab.com \n\ndocker run -p 80:80 registry.gitlab.com/your-project-path/main:latest\n\n```\n\n\nBy doing so you should be able to access your application locally on your\nlocalhost address through your web browser.\n\n\nYou can now add a deployment job to your pipeline:\n\n\n```\n\ndeploy:\n  stage: deploy\n  image: alpine:latest\n  script:\n    - chmod 400 $GITLAB_KEY\n    - apk add openssh-client\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - ssh -i $GITLAB_KEY -o StrictHostKeyChecking=no $USER@$TARGET_SERVER \n      docker pull $TAG_COMMIT &&\n      docker rm -f myapp || true &&\n      docker run -d -p 80:80 --name myapp $TAG_COMMIT\n```\n\n\nThis job:\n\n\n- Sets up SSH access to your deployment target\n\n- Pulls the latest image\n\n- Removes any existing container\n\n- Deploys the new version\n\n\n#### 6. Track deployments\n\n\nEnable deployment tracking by adding environment configuration:\n\n\n```\n\ndeploy:\n  environment:\n    name: production\n    url: https://your-application-url.com \n```\n\n\nThis creates an environment object in GitLab's **Operate > Environments**\nsection, providing:\n\n\n- Deployment history\n\n- Current deployment status\n\n- Quick access to your application\n\n\nWhile a single environment pipeline is a good starting point, most teams\nneed to manage multiple environments for proper testing and staging. Let's\nexpand our pipeline to handle this more realistic scenario.\n\n\n#### 7. Set up multiple environments\n\n\nFor a more robust pipeline, configure staging and production deployments:\n\n\n```\n\nstages:\n  - publish\n  - staging\n  - release\n  - version\n  - production\n\nstaging:\n  stage: staging\n  rules:\n    - if: $CI_COMMIT_BRANCH == \"main\" && $CI_COMMIT_TAG == null\n  environment:\n    name: staging\n    url: https://staging.your-app.com\n  # deployment script here\n\nproduction:\n  stage: production\n  rules:\n    - if: $CI_COMMIT_TAG\n  environment:\n    name: production\n    url: https://your-app.com\n  # deployment script here\n```\n\n\nThis setup:\n\n\n- Deploys to staging from your main branch\n\n- Uses GitLab tags to trigger production deployments\n\n- Provides separate tracking for each environment\n\n\nHere and in our next step, we’re leveraging a very useful GitLab feature:\ntags. By manually creating a tag in the **Code > Tags** section, the\n`$CI_COMMIT_TAG` gets created, which allows us to trigger jobs accordingly.\n\n\n#### 8. Create automated release notes\n\n\nWe'll be using GitLab's release capabilities through our CI/CD pipeline.\nFirst, update your stages in `.gitlab-ci.yml`:\n\n\n```\n\nstages:\n\n\n- publish\n\n- staging\n\n- release # New stage for releases\n\n- version\n\n- production\n\n```\n\n\nNext, add the release job:\n\n\n```\n\nrelease_job:\n  stage: release\n  image: registry.gitlab.com/gitlab-org/release-cli:latest\n  rules:\n    - if: $CI_COMMIT_TAG                  # Only run when a tag is created\n  script:\n    - echo \"Creating release for $CI_COMMIT_TAG\"\n  release:                                # Release configuration\n    name: 'Release $CI_COMMIT_TAG'\n    description: 'Release created from $CI_COMMIT_TAG'\n    tag_name: '$CI_COMMIT_TAG'           # The tag to create\n    ref: '$CI_COMMIT_TAG'                # The tag to base release on\n```\n\n\nYou can enhance this by adding links to your container images:\n\n\n```\n\nrelease:\n  name: 'Release $CI_COMMIT_TAG'\n  description: 'Release created from $CI_COMMIT_TAG'\n  tag_name: '$CI_COMMIT_TAG'\n  ref: '$CI_COMMIT_TAG'\n  assets:\n    links:\n      - name: 'Container Image'\n        url: '$CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG'\n        link_type: 'image'\n```\n\n\nFor meaningful automated release notes:\n\n\n- Use conventional commits (feat:, fix:, etc.)\n\n- Include issue numbers (#123)\n\n- Separate subject from body with blank line\n\n\nIf you want custom release notes with deployment info:\n\n\n```\n\nrelease_job:\n  script:\n    - |\n      DEPLOY_TIME=$(date '+%Y-%m-%d %H:%M:%S')\n      CHANGES=$(git log $(git describe --tags --abbrev=0 @^)..@ --pretty=format:\"- %s\")\n      cat > release_notes.md \u003C\u003C EOF\n      ## Deployment Info\n      - Deployed on: $DEPLOY_TIME\n      - Environment: Production\n      - Version: $CI_COMMIT_TAG\n\n      ## Changes\n      $CHANGES\n\n      ## Artifacts\n      - Container Image: \\`$CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG\\`\n      EOF\n  release:\n    description: './release_notes.md'\n```\n\n\nOnce configured, releases will be created automatically when you create a\nGit tag. You can view them in GitLab under **Deploy > Releases**.\n\n\n#### 9. Put it all together\n\n\nThis is what our final YAML file looks like:\n\n\n```\n\nvariables:\n  TAG_LATEST: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_NAME:latest\n  TAG_COMMIT: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_NAME:$CI_COMMIT_SHA\n  STAGING_TARGET: $STAGING_TARGET    # Set in CI/CD Variables\n  PRODUCTION_TARGET: $PRODUCTION_TARGET  # Set in CI/CD Variables\n\nstages:\n  - publish\n  - staging\n  - release\n  - version\n  - production\n\n# Build and publish to registry\n\npublish:\n  stage: publish\n  image: docker:latest\n  services:\n    - docker:dind\n  rules:\n    - if: $CI_COMMIT_BRANCH == \"main\" && $CI_COMMIT_TAG == null\n  script:\n    - docker build -t $TAG_LATEST -t $TAG_COMMIT .\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - docker push $TAG_LATEST\n    - docker push $TAG_COMMIT\n\n# Deploy to staging\n\nstaging:\n  stage: staging\n  image: alpine:latest\n  rules:\n    - if: $CI_COMMIT_BRANCH == \"main\" && $CI_COMMIT_TAG == null\n  script:\n    - chmod 400 $GITLAB_KEY\n    - apk add openssh-client\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - ssh -i $GITLAB_KEY -o StrictHostKeyChecking=no $USER@$STAGING_TARGET \"\n        docker pull $TAG_COMMIT &&\n        docker rm -f myapp || true &&\n        docker run -d -p 80:80 --name myapp $TAG_COMMIT\"\n  environment:\n    name: staging\n    url: http://$STAGING_TARGET\n\n# Create release\n\nrelease_job:\n  stage: release\n  image: registry.gitlab.com/gitlab-org/release-cli:latest\n  rules:\n    - if: $CI_COMMIT_TAG\n  script:\n    - |\n      DEPLOY_TIME=$(date '+%Y-%m-%d %H:%M:%S')\n      CHANGES=$(git log $(git describe --tags --abbrev=0 @^)..@ --pretty=format:\"- %s\")\n      cat > release_notes.md \u003C\u003C EOF\n      ## Deployment Info\n      - Deployed on: $DEPLOY_TIME\n      - Environment: Production\n      - Version: $CI_COMMIT_TAG\n\n      ## Changes\n      $CHANGES\n\n      ## Artifacts\n      - Container Image: \\`$CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG\\`\n      EOF\n  release:\n    name: 'Release $CI_COMMIT_TAG'\n    description: './release_notes.md'\n    tag_name: '$CI_COMMIT_TAG'\n    ref: '$CI_COMMIT_TAG'\n    assets:\n      links:\n        - name: 'Container Image'\n          url: '$CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG'\n          link_type: 'image'\n\n# Version the image with release tag\n\nversion_job:\n  stage: version\n  image: docker:latest\n  services:\n    - docker:dind\n  rules:\n    - if: $CI_COMMIT_TAG\n  script:\n    - docker pull $TAG_COMMIT\n    - docker tag $TAG_COMMIT $CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - docker push $CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG\n\n# Deploy to production\n\nproduction:\n  stage: production\n  image: alpine:latest\n  rules:\n    - if: $CI_COMMIT_TAG\n  script:\n    - chmod 400 $GITLAB_KEY\n    - apk add openssh-client\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - ssh -i $GITLAB_KEY -o StrictHostKeyChecking=no $USER@$PRODUCTION_TARGET \"\n        docker pull $CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG &&\n        docker rm -f myapp || true &&\n        docker run -d -p 80:80 --name myapp $CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG\"\n  environment:\n    name: production\n    url: http://$PRODUCTION_TARGET\n```\n\n\nThis complete pipeline:\n\n\n- Publishes images to the registry (main branch)\n\n- Deploys to staging (main branch)\n\n- Creates releases (on tags)\n\n- Versions images with release tags\n\n- Deploys to production (on tags)\n\n\nKey benefits:\n\n\n- Clean reproducible, local development and testing environment\n\n- Clear path to production environments with structure to build confidence\nin what is deployed\n\n- Pattern to recover from unexpected failures, etc.\n\n- Ready to scale/adopt more complex deployment strategies\n\n\n### Best practices\n\n\nThroughout implementation, maintain these principles:\n\n\n- Document everything, from variable usage to deployment procedures\n\n- Use GitLab's built-in features (environments, releases, registry)\n\n- Implement proper access controls and security measures\n\n- Plan for failure with robust rollback procedures\n\n- Keep your pipeline configurations DRY (Don't Repeat Yourself)\n\n\n## Scale your deployment strategy\n\n\nWhat next? Here are some aspects to consider as your continuous deployment\nstrategy matures.\n\n\n### Advanced security measures\n\n\nEnhance security through:\n\n\n- Protected environments with restricted access\n\n- Required approvals for production deployments\n\n- Integrated security scanning\n\n- Automated vulnerability assessments\n\n- Branch protection rules for deployment-related changes\n\n\n### Progressive delivery strategies\n\n\nImplement advanced deployment strategies:\n\n\n- Feature flags for controlled rollouts\n\n- Canary deployments for risk mitigation\n\n- Blue-green deployment strategies\n\n- A/B testing capabilities\n\n- Dynamic environment management\n\n\n### Monitoring and optimization\n\n\nEstablish robust monitoring practices:\n\n\n- Track deployment metrics\n\n- Set up performance monitoring\n\n- Configure deployment alerts\n\n- Establish deployment SLOs\n\n- Regular pipeline optimization\n\n\n## Why GitLab?\n\n\nGitLab's continuous deployment capabilities make it a standout choice for\nmodern deployment workflows. The platform excels in streamlining the path\nfrom code to production, offering built-in container registry, environment\nmanagement, and deployment tracking all within a single interface. GitLab's\nenvironment-specific variables, deployment approval gates, and rollback\ncapabilities provide the security and control needed for production\ndeployments, while features like review apps and feature flags enable\nprogressive delivery approaches. As part of GitLab's complete DevSecOps\nplatform, these CD capabilities seamlessly integrate with your entire\nsoftware lifecycle.\n\n\n## Get started today\n\n\nThe journey to continuous deployment is an evolution, not a revolution.\nStart with the fundamentals, build a solid foundation, and gradually\nincorporate advanced features as your team's needs grow. GitLab provides the\ntools and flexibility to support you at every stage of this journey, from\nyour first automated deployment to complex, multi-environment delivery\npipelines.\n\n\n> Sign up for a [free trial of GitLab\nUltimate](https://about.gitlab.com/free-trial/devsecops/) to get started\nwith continous deployment today.\n",[786,9,695,693,917],{"slug":2354,"featured":6,"template":700},"from-code-to-production-a-guide-to-continuous-deployment-with-gitlab","content:en-us:blog:from-code-to-production-a-guide-to-continuous-deployment-with-gitlab.yml","From Code To Production A Guide To Continuous Deployment With Gitlab","en-us/blog/from-code-to-production-a-guide-to-continuous-deployment-with-gitlab.yml","en-us/blog/from-code-to-production-a-guide-to-continuous-deployment-with-gitlab",{"_path":2360,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2361,"content":2367,"config":2373,"_id":2375,"_type":14,"title":2376,"_source":16,"_file":2377,"_stem":2378,"_extension":19},"/en-us/blog/from-idea-to-production-on-thousands-of-clouds",{"title":2362,"description":2363,"ogTitle":2362,"ogDescription":2363,"noIndex":6,"ogImage":2364,"ogUrl":2365,"ogSiteName":685,"ogType":686,"canonicalUrls":2365,"schema":2366},"From idea to production on thousands of clouds","Deliver cloud native applications in more places consistently at scale with GitLab and Gravity.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679266/Blog/Hero%20Images/blue-lights.jpg","https://about.gitlab.com/blog/from-idea-to-production-on-thousands-of-clouds","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"From idea to production on thousands of clouds\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ev Kontsevoy\"}],\n        \"datePublished\": \"2019-11-20\",\n      }",{"title":2362,"description":2363,"authors":2368,"heroImage":2364,"date":2370,"body":2371,"category":300,"tags":2372},[2369],"Ev Kontsevoy","2019-11-20","\nToday, deploying an application with GitLab is easier than ever: just create a Kubernetes cluster on your cloud of choice, connect it to GitLab with the Kubernetes integration, and Auto DevOps creates a full deployment pipeline for you.\n\nBut what if you need your app to run in two clusters in two separate regions? Ten clusters across multiple cloud providers? A hundred clusters and also on a fleet of self-driving trucks?\n\nAt [Gravitational](https://gravitational.com), we believe the future should not belong to a single cloud provider and developers should be able to run their applications anywhere with the same simplicity as having a single Kubernetes cluster.\n\nI am a huge fan of GitLab. I’ve had the great pleasure of getting to know much of the founding team [over the years](https://about.gitlab.com/blog/gitlab-joins-forces-with-gravitational/) and was happy to provide my [own contribution](https://gitlab.com/gitlab-org/gitlab-foss/issues/22864) to the community a while back. Today, I’m happy to share some thoughts on how to build with GitLab and deploy applications into dozens or even hundreds of cloud environments. \n\n## The rise of multicloud\n\nHow do you run applications in different data centers? Do you need to rewrite them from scratch? AWS may still be the dominant cloud provider, but cloud competitors are eating into their lead. It’s not just the big public cloud companies either. [Private cloud data centers](https://www.forbes.com/sites/jasonbloomberg/2019/02/02/have-private-clouds-finally-found-their-place-in-the-enterprise/#2f859685604f) are growing just as rapidly.\n\nMany companies that need to meet tough security and compliance requirements will require applications to run in their bare metal data centers. Running an application on an on-premises or even air-gapped data center adds additional complexity due to the hundreds or even thousands of dependencies in modern applications.\n\nGravitational has built Gravity, an open source [Kubernetes packaging solution ](https://gravitational.com/gravity/)that allows developers to build “cluster images” (similar to VM images) that can contain an entire Kubernetes cluster pre-loaded with multiple applications. You would use GitLab to go from idea to production, and Gravity to expand your production to anywhere in the world. \n\nStatements like, “I have snapshotted our entire production environment and emailed it to you, so you can run it in your private data center,” will not seem completely crazy.\n\nGravity uses standard, upstream CNCF-supported tooling for creating \"images\" of Kubernetes clusters containing the applications and their dependencies. The resulting files are called cluster images which are just .tar files.\n\nA cluster image can be used to recreate full replicas of the original environments for any deployment environment where compliance and consistency matter, i.e. in locked-down AWS/GCP/Azure environments or even in air-gapped server rooms. Each image includes all dependencies to spin up a full cluster, as well as the Gravity daemon that handles the most common operational tasks associated with Kubernetes applications, and it monitors and alerts human operators of problems.\n\n## Deploy with GitLab, scale with Gravity\n\n![Gravity dashboard](https://about.gitlab.com/images/blogimages/gravity-dashboard.png)\n\nDevelopers can leverage a GitLab repository as a single source of truth for rolling out a Kubernetes app and leverage [GitLab CI/CD](https://docs.gitlab.com/ee/ci/) for continuous delivery.\n\nAny project of meaningful scale begins by defining an [epic](https://docs.gitlab.com/ee/user/group/epics/) with goals, milestones, and tasks. An [issue](https://docs.gitlab.com/ee/user/project/issues/#issues) is the main object for collaborating on ideas and planning work. GitLab’s [package and container registry](https://about.gitlab.com/stages-devops-lifecycle/package/) helps you manage and package dependencies. \n\n[The GitLab Kubernetes integration](https://docs.gitlab.com/ee/user/project/clusters/) allows customers to create Kubernetes clusters, utilize review apps, run pipelines, use web terminals, deploy apps, view pod logs, detect and monitor Kubernetes, and much more. For deploying a Kubernetes cluster in a single destination, GitLab provides everything you need from start to finish. \n\nHowever, if your customers need to run your application in their private data centers, they can use Gravity, which essentially copy/pastes the entire Kubernetes cluster environment you’ve built in GitLab. \n\n[Download](https://gravitational.com/gravity/download/) and set up the Gravity open source edition following our [quickstart guide](https://gravitational.com/gravity/docs/quickstart/). From Gravity, you can build a cluster image of your Kubernetes application. Gravity’s [documentation](https://gravitational.com/gravity/docs/overview/) will walk you through the steps required to build an image manifest that describes the image build, the installation process, and the system requirements for the cluster. \n\nYou can build empty Kubernetes cluster images to quickly create a large number of identical, production-ready Kubernetes clusters within an organization, or you can build a cluster image that also includes Kubernetes applications to distribute your application to third parties. \n\n## Next steps\n\nIf you want to learn more about working with Kubernetes, start with [Kubernetes 101](https://www.youtube.com/watch?v=rq4GZ_GybN8). You’ll learn how GitLab and Kubernetes interact at various touchpoints. And, if you’re looking for a way to port your applications to new environments, check out [Gravity](https://gravitational.com/gravity). \n\n## About the guest author\n\nEv is a co-founder and the CEO of Gravitational. Before Gravitational, he launched the on-demand OpenCompute servers at Rackspace. Prior to Rackspace, he co-founded Mailgun, the first email service built for developers. Ev has been a fighter against unnecessary complexity in software for 20 years. He abhors cars but loves trains and open source software that doesn't require an army of consultants to operate.\n\n## About Gravitational\n\n[Gravitational](https://gravitational.com) helps companies deliver cloud applications across cloud providers, on-premises environments, and even air-gapped server rooms. Products include Teleport for multi-cloud privileged access management that doesn't get in the way of developer productivity, and Gravity, a Kubernetes packaging solution that takes the drama out of on-prem deployments. Gravitational was founded in 2015 and recently [announced their Series A](https://gravitational.com/blog/gravitational-series-a-funding/). \n\nCover image by [Sharon McCutcheon](https://unsplash.com/@sharonmccutcheon) on [Unsplash](https://unsplash.com/photos/TMwHpCrU8D4)\n",[830,721,232,1228,9,829],{"slug":2374,"featured":6,"template":700},"from-idea-to-production-on-thousands-of-clouds","content:en-us:blog:from-idea-to-production-on-thousands-of-clouds.yml","From Idea To Production On Thousands Of Clouds","en-us/blog/from-idea-to-production-on-thousands-of-clouds.yml","en-us/blog/from-idea-to-production-on-thousands-of-clouds",{"_path":2380,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2381,"content":2387,"config":2391,"_id":2393,"_type":14,"title":2394,"_source":16,"_file":2395,"_stem":2396,"_extension":19},"/en-us/blog/fuzzing-with-gitlab",{"title":2382,"description":2383,"ogTitle":2382,"ogDescription":2383,"noIndex":6,"ogImage":2384,"ogUrl":2385,"ogSiteName":685,"ogType":686,"canonicalUrls":2385,"schema":2386},"Find Bugs with Coverage-Guided Fuzz Testing","Use fuzz testing to find bugs and security issues other QA processes might miss.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681620/Blog/Hero%20Images/taya-dianna-zgSaLgXIINI-unsplash.jpg","https://about.gitlab.com/blog/fuzzing-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Find Bugs with Coverage-Guided Fuzz Testing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2020-10-01\",\n      }",{"title":2382,"description":2383,"authors":2388,"heroImage":2384,"date":1776,"body":2389,"category":978,"tags":2390},[1775],"\n\n{::options parse_block_html=\"true\" /}\n\n\n\nGitLab comes with built-in coverage-guided fuzz testing. Coverage-guided fuzz testing helps you discover\nbugs and potential security issues that other QA processes might miss. It sends\nrandom inputs to an instrumented version of your application in an effort to cause unexpected behavior,\nsuch as a crash. Such behavior indicates a bug that you should address.\n\nWatch this short video (3 minutes) to learn how to configure [Fuzz-Testing](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/) on GitLab.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/4ROYvNfRZVU\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\nCover image by [Taya Dianna](https://unsplash.com/@tayadianna) on [Unsplash](https://unsplash.com/)\n{: .note}\n\n\n",[9,697],{"slug":2392,"featured":6,"template":700},"fuzzing-with-gitlab","content:en-us:blog:fuzzing-with-gitlab.yml","Fuzzing With Gitlab","en-us/blog/fuzzing-with-gitlab.yml","en-us/blog/fuzzing-with-gitlab",{"_path":2398,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2399,"content":2405,"config":2411,"_id":2413,"_type":14,"title":2414,"_source":16,"_file":2415,"_stem":2416,"_extension":19},"/en-us/blog/get-started-ci-pipeline-templates",{"title":2400,"description":2401,"ogTitle":2400,"ogDescription":2401,"noIndex":6,"ogImage":2402,"ogUrl":2403,"ogSiteName":685,"ogType":686,"canonicalUrls":2403,"schema":2404},"How to use GitLab’s CI/CD pipeline templates","Learn how pipeline templates and Auto DevOps can get you up and running on GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667139/Blog/Hero%20Images/CI-pipeline-templates.jpg","https://about.gitlab.com/blog/get-started-ci-pipeline-templates","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab’s CI/CD pipeline templates\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-09-23\",\n      }",{"title":2400,"description":2401,"authors":2406,"heroImage":2402,"date":2407,"body":2408,"category":1040,"tags":2409},[715],"2020-09-23","\nWriting deployment pipelines from scratch is a real pain in the branch. We want to make the [continuous integration](/topics/ci-cd/) experience more automatic so teams can get up and running quickly with [GitLab CI/CD](/topics/ci-cd/).\n\nAn easy way to get started is with GitLab’s CI/CD pipeline templates. Pipeline templates come in **more than 30** popular programming languages and frameworks. We’ll show you how to use these pipeline templates for your specific needs.\n\nFor an even more automatic continuous integration experience, we also offer [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) that does much of the legwork for you. Auto DevOps runs on pipelines automatically when a [Dockerfile or matching buildpack](https://docs.gitlab.com/ee/topics/autodevops/stages.html#auto-build) exists, and identifies dependencies automatically.\n\n## What are CI pipeline templates?\n\n[Pipelines](https://docs.gitlab.com/ee/ci/pipelines/) are an integral component of both continuous integration (CI) and [continuous delivery (CD)](/topics/continuous-delivery/), and continuous deployment (the other \"CD\"). A deployment pipeline consists of two things:\n\n*   Jobs, which define _what_ to do. For example, jobs that compile or test code.\n*   Stages, which define _when_ to run the jobs. For example, stages that run tests after stages that compile the code.\n\nPipelines consist of one or more stages that run in order and can each contain one or more jobs that run in parallel. These jobs (or scripts) get run by agents, such as a [GitLab Runner](https://docs.gitlab.com/runner/).\n\nAt GitLab, pipelines are defined in a `gitlab-ci.yml` file. [CI/CD templates](https://docs.gitlab.com/ee/ci/examples/#cicd-templates) incorporate your favorite programming language or framework into this YAML file. Instead of building pipelines from scratch, CI/CD templates simplify the process by having parameters already built-in.\n\nYou can choose one of these templates when you create a `gitlab-ci.yml` file in the UI.\n\n![GitLab CI pipeline templates](https://docs.gitlab.com/ee/ci/img/add_file_template_11_10.png)\n\nBecause our CI/CD templates come in more than 30 popular languages, the chances are good that we have the template you need to get started in our [CI template repository](https://gitlab.com/gitlab-org/gitlab/tree/master/lib/gitlab/ci/templates).\n\n## What is Auto DevOps?\n\n[Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) is a GitLab-exclusive feature that provides predefined CI/CD configurations that automatically detect, build, test, deploy, and monitor your applications. Rather than just accessing a template, Auto DevOps is a setting within your GitLab instance that is [enabled by default](https://docs.gitlab.com/ee/topics/autodevops/#enabled-by-default).\n\nOur [product vision for Auto DevOps](/direction/delivery/auto_devops/) is that everything is fully connected as part of one great GitLab experience. The term Auto DevOps actually comes from the different parts that are automated by Auto DevOps:\n\n*   \"Auto CI\" – Compile and test software based on best practices for the most common languages and frameworks.\n*   \"Auto review\" – Automatic analysis tools like Code Climate.\n*   \"Auto deploy\" – Based on [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) and incremental rollouts on Kubernetes clusters.\n*   \"Auto metrics\" – Collect statistical data from all the previous steps in order to guarantee performances and optimization of the whole process.\n\nAuto DevOps provides great defaults for all the stages and makes use of CI templates. You can [customize Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/customize.html) to meet your needs, and [manage Auto DevOps with GitLab APIs](https://docs.gitlab.com/ee/topics/autodevops/customize.html#extend-auto-devops-with-the-api).\n\nLearn more about Auto DevOps, check out this video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/0Tc0YYBxqi4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Other CI/CD resources\n\nGitLab also provides [CI/CD examples](https://docs.gitlab.com/ee/ci/examples/) so you can learn how to implement GitLab CI/CD for your specific use case. In addition to template files, you can find repositories with sample projects, and step-by-step tutorials for a variety of scenarios, including:\n\n*   [DevOps and Game Dev with GitLab CI/CD](https://docs.gitlab.com/ee/ci/examples/)\n*   [Test and deploy a Ruby application with GitLab CI/CD](https://docs.gitlab.com/ee/ci/examples/)\n*   [How to deploy Maven projects to Artifactory with GitLab CI/CD](https://docs.gitlab.com/ee/ci/examples/)\n*   ... And many others\n\nWith CI/CD templates and our Auto DevOps product feature, teams can start reaping the benefits of continuous integration without all of the manual configurations. For teams managing sometimes _hundreds_ of projects, it’s not realistic or doable to start from scratch. And with GitLab, you don’t have to.\n\nCurious about our best-in-class continuous integration? [Try GitLab free](/free-trial/).\n\n## Related reads\n\n*   [\"A beginner's guide to continuous integration\"](/blog/a-beginners-guide-to-continuous-integration/)\n\n*   [\"Want a more effective CI/CD pipeline? Try our pro tips\"](/blog/effective-ci-cd-pipelines/)\n\n*   [\"3 CI/CD challenges to consider\"](/blog/modernize-your-ci-cd/)\n\nCover image by [chuttersnap](https://unsplash.com/@chuttersnap?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/laboratory?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,2410,721],"UI",{"slug":2412,"featured":6,"template":700},"get-started-ci-pipeline-templates","content:en-us:blog:get-started-ci-pipeline-templates.yml","Get Started Ci Pipeline Templates","en-us/blog/get-started-ci-pipeline-templates.yml","en-us/blog/get-started-ci-pipeline-templates",{"_path":2418,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2419,"content":2423,"config":2430,"_id":2432,"_type":14,"title":2433,"_source":16,"_file":2434,"_stem":2435,"_extension":19},"/en-us/blog/get-started-with-gitlab-duo-agentic-chat-in-the-web-ui",{"config":2420,"title":2421,"description":2422},{"noIndex":6},"Get started with GitLab Duo Agentic Chat in the web UI","Learn about our new GitLab Duo AI feature that automates tasks by breaking down complex problems and executing operations across multiple sources.",{"title":2421,"description":2422,"authors":2424,"heroImage":846,"date":2427,"body":2428,"category":849,"tags":2429},[2425,2426],"Fatima Sarah Khalid","Daniel Helfand","2025-08-11","In May 2025, GitLab launched an experimental feature called [GitLab Duo\nAgentic\nChat](https://about.gitlab.com/blog/gitlab-duo-chat-gets-agentic-ai-makeover/).\nThe goal of Agentic Chat was to build on the success of [GitLab Duo\nChat](https://docs.gitlab.com/user/gitlab_duo_chat/), which is an AI chat\nexperience built into supported IDEs and in the GitLab UI. While Chat\nprovides answers and suggestions for developers using the GitLab platform,\nAgentic Chat can more directly interact with the GitLab API on behalf of\nusers, taking actions on their behalf as a result of the conversation.\n\n\nIn addition to being available in a variety of IDEs, Agentic Chat is available directly within the GitLab UI for GitLab users with the Duo Pro or Enterprise add-on. Adding Agentic Chat to the GitLab UI helps make this experience more accessible to all GitLab users and easy to integrate into your workflows. To open Agentic Chat:\n\n\n1. Navigate to any Group or Project in your GitLab instance.  \n\n2. Look for the GitLab Duo Chat button (typically in the top right corner).  \n\n3. Click to open the chat panel.  \n\n4. Toggle to **Agentic mode (Beta)** in the chat window.\n\n\n**Pro tip:** Keep the chat panel open as you work — it maintains context and can help you across different pages and projects.\n\n\nTo get familiar with Agentic Chat, ask about the tools it can work with. This is like using the help command for a command-line tool.  \n\n\n```offset\nWhat tools do you have access to? \n```\n\n![GitLab Duo Agentic Chat screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1754584200/emtgilbzbu8ftkynjozg.png)\n\nThe output above shows us that Agentic Chat has access to a variety of GitLab APIs and data that will allow it to perform complex tasks across the software development lifecycle.   \n\n## Issue management made easy\n\n\nGitLab Duo Agentic Chat can help you keep track of issues, find specific ones, understand the status, and take actions based on conversations in these issues. Instead of navigating through pages and pages of issues, you ask Agentic Chat about the issues in a project. It will respond with high-level information about the issues, including the priority, labels, and the status of the issue.\n\n\nFor a specific issue, Agentic Chat will fetch the issue details, provide a concise summary, highlight recent activity, and share the goal of the issue. This is particularly helpful when you need context or updates before a meeting or are researching the issue before picking it up. \n\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1107479358?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Agentic Chat UI Issue Management\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n\u003Cp>\u003C/p>\n\n\nYou can also try more complex queries if you're looking to better understand a project overall. And once you've discovered these issues, you can make changes to them like adding labels, updating milestones, and re-organizing them. \n\n\nFor example, maybe you're looking for all the issues that are database- or performance-related in order to prioritize them in the next sprint. You could task Agentic Chat with the following prompt.\n\n\n```offset\nAnalyze all issues labeled 'performance' and 'database' - group them by component and show me which ones have had the most discussion activity in the last 30 days.\n```\n\n\nAgentic Chat will respond with issues grouped by the backend and frontend component of a project, identify the issues with significant discussion activity, and provide insights on these kinds of issues (e.g., when were most of these issues created or which component issues have more active discussion).\n\n\n```offset\nCreate an issue template for bug reports that includes:\n\n- Steps to reproduce\n\n- Expected behavior vs actual behavior\n\n- Environment details (browser, OS, GitLab version)\n\n- Severity assessment\n\n- Screenshots/error logs section\n\nName it \"bug_report.md\" and format it as a proper GitLab issue template\n```\n\n\n## CI/CD support  \n\n\nThis is where GitLab Duo Agentic Chat truly becomes your debugging superhero. We've all been there: a pipeline fails and you have to click through job logs trying to understand what went wrong. Agentic Chat can do more than just explain the failure to you and suggest recommendations. After reviewing the failed pipeline logs, Agentic Chat can suggest a fix and also add the fix to a merge request you are working on.\n\n\nLet's say you have a merge request adding a new feature, but the pipeline is failing. Instead of clicking through each failed job and trying to piece together what's wrong, you can ask Agentic Chat to investigate.\n\n\nAgentic Chat will analyze the pipeline, check the job logs, and explain that the tests are failing because of missing test data or configuration issues. But here's where it gets even more powerful — you don't have to stop at understanding the problem. Agentic Chat can also act on the advice it presents and add commits to fix the pipeline in the merge request.\n\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1107495269?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" referrerpolicy=\"strict-origin-when-cross-origin\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"GitLab Agentic Chat CI/CD Fix\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n\n\n## Building complex prompts\n\n\nGitLab Duo Agentic Chat can also help you craft your prompts. Let's say that you're running a bug bash with your team and want to triage all possible issues that might be bug reports.\n\n\nIf you use a simple prompt like below, Agentic Chat will come up with ways to find the related issues, such as searching for terms or pattern matching:\n\n\n```offset\nI need help writing an effective prompt to find all possible bug report issues in my GitLab project, including those that might not be properly labeled as \"bug\".\n```\n\nOften you can use the recommendations in Agentic Chat to build a more in-depth prompt based on what you're looking for: \n\n```offset\nI need help writing an effective prompt to find all possible bug report issues in my GitLab project, including those that might not be properly labeled as \"bug\". Please help me create a prompt that will:\n\n\n1. Search for common bug-related terminology beyond just the word \"bug\"\n\n2. Identify patterns that indicate bug reports (like \"steps to reproduce\", \"expected vs actual behavior\")\n\n3. Find technical issues that might be bugs (errors, crashes, performance problems)\n\n4. Catch user-reported problems that could be bugs but use different language\n\n\nThe prompt should ensure we don't miss any potential bugs regardless of how they're described or labeled. What would be the most effective approach and search strategy for this?\n```\n\n\nOnce you have the prompt and you're able to search for the issues you're looking for, that's where Agentic Chat really shines. Agentic Chat can triage and update those issues for you to prepare them for the bug bash:\n\n\n```offset\nFind and triage all bug-related issues for our bug bash event. Execute these steps:\n\n\n1. Search for potential bugs using individual searches:\n   - Core terms: \"bug\", \"fix\", \"error\", \"broken\", \"issue\", \"problem\", \"not working\"\n   - Bug patterns: \"steps to reproduce\", \"expected behavior\", \"regression\"\n   - Technical issues: \"exception\", \"crash\", \"console error\", \"500 error\", \"404 error\"\n   - Performance: \"slow\", \"freezes\", \"unresponsive\"\n\n2. For each issue found:\n   - Add the \"Event - Bug Bash\" label\n   - Assign appropriate bug severity label (critical/high/medium/low)\n   - Add to the current bug bash milestone\n   - If missing \"bug\" label, add it\n\n3. Create a triage list organized by:\n   - Critical bugs (data loss, crashes, security)\n   - High priority (blocking features, frequent errors)\n   - Medium priority (workarounds available)\n   - Low priority (minor UI issues)\n\nSearch both open and closed issues. Focus on actionable bugs that can be fixed during the bug bash, excluding enhancement requests. Provide a summary table with issue numbers, titles, and assigned severity for the bug bash team.\n```\n\n\nYou can ask Agentic Chat to create a bug report template, which increases efficiency and eliminates some manual effort. Also, future bug reports will have the structure and labels you need for more efficient triaging.   \n\n## Tips for effective prompting \n\n\nWhen you're working with GitLab Duo Agentic Chat, it's important to phrase your requests with action-oriented verbs like \"create,\" \"update,\" \"fix,\" or \"assign.\" This will trigger the agentic tools to take action rather than summarize or share information with you. One approach before taking agentic actions can be to request summaries and analyses — the way we did with the issues about bugs. Then, see what gets returned before taking actions like applying a label or adding to a milestone. \n\n\nIt's also important to give clear criteria when asking for bulk operations. Specify exact conditions like \"all issues with the 'bug' label created in the last week\" or \"merge requests waiting for review for more than 3 days.\" The more specific you are, the more accurate and helpful the results will be.\n\n\nSince Agentic Chat has the ability to maintain context, you can chain requests and build on previous requests. After getting an initial set of issues, you might ask \"From those issues, which ones are unassigned?\" and then follow up with \"Assign the high-priority ones to the backend team.\" This allows you to refine and act on information iteratively. \n\n\nWe recommend starting with an open-ended request and allowing GitLab Duo to help you look for patterns or similar problems across your project. That will help you catch any problem that you may have missed or understand the scope of the challenge before taking action.   \n\n\n## Get hands-on with GitLab Duo Agentic Chat\n\n\nWe hope all the ideas above give you some thoughts on getting started with Agentic Chat, but we are even more excited to see all our users' ideas come to life with it. To try the Agentic Chat UI experience in your next project, sign up for a [free trial of GitLab Ultimate with Duo Enterprise](https://about.gitlab.com/free-trial/). You can learn more about GitLab Duo Agentic Chat on our [documentation page](https://docs.gitlab.com/user/gitlab_duo_chat/agentic_chat/), which also details how to enable Agentic Chat in the GitLab UI.\n",[851,9],{"featured":91,"template":700,"slug":2431},"get-started-with-gitlab-duo-agentic-chat-in-the-web-ui","content:en-us:blog:get-started-with-gitlab-duo-agentic-chat-in-the-web-ui.yml","Get Started With Gitlab Duo Agentic Chat In The Web Ui","en-us/blog/get-started-with-gitlab-duo-agentic-chat-in-the-web-ui.yml","en-us/blog/get-started-with-gitlab-duo-agentic-chat-in-the-web-ui",{"_path":2437,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2438,"content":2443,"config":2448,"_id":2450,"_type":14,"title":2451,"_source":16,"_file":2452,"_stem":2453,"_extension":19},"/en-us/blog/getting-started-with-gitlab-understanding-ci-cd",{"title":2439,"description":2440,"ogTitle":2439,"ogDescription":2440,"noIndex":6,"ogImage":1076,"ogUrl":2441,"ogSiteName":685,"ogType":686,"canonicalUrls":2441,"schema":2442},"Getting started with GitLab: Understanding CI/CD","Learn the basics of continuous integration/continuous delivery in this beginner's guide, including what CI/CD components are and how to create them.","https://about.gitlab.com/blog/getting-started-with-gitlab-understanding-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with GitLab: Understanding CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2025-04-25\",\n      }",{"title":2439,"description":2440,"authors":2444,"heroImage":1076,"date":2445,"body":2446,"category":693,"tags":2447},[1081],"2025-04-25","*Welcome to our \"Getting started with GitLab\" series, where we help\nnewcomers get familiar with the GitLab DevSecOps platform.*\n\n\nImagine a workflow where every code change is automatically built, tested,\nand deployed to your users. That's the power of [Continuous\nIntegration/Continuous Delivery\n(CI/CD)](https://about.gitlab.com/topics/ci-cd/)! CI/CD helps you catch bugs\nearly, ensures code quality, and delivers software faster and more\nfrequently.\n\n\n### What is CI/CD?\n\n\n* **Continuous Integration** is a development practice where developers\nintegrate code changes into a shared repository frequently, preferably\nseveral times a day. Each integration is then verified by an automated build\nand test process, allowing teams to detect problems early.  \n\n* **Continuous Delivery** extends CI by automating the release pipeline,\nensuring that your code is *always* in a deployable state. You can deploy\nyour application to various environments (e.g., staging, production) with a\nsingle click or automatically.  \n\n* **Continuous Deployment** takes it a step further by automatically\ndeploying *every successful build* to production. This requires a high\ndegree of confidence in your automated tests and deployment process.\n\n\n### Why GitLab CI/CD?\n\n\nGitLab CI/CD is a powerful, integrated system that comes built-in with\nGitLab. It offers a seamless experience for automating your entire software\ndevelopment lifecycle. With GitLab CI/CD, you can:\n\n\n* **Automate everything:** Build, test, and deploy your applications with\nease.  \n\n* **Catch bugs early:** Detect and fix errors before they reach\nproduction.  \n\n* **Get faster feedback:** Receive immediate feedback on your code\nchanges.  \n\n* **Improve collaboration:** Work together more effectively with automated\nworkflows.  \n\n* **Accelerate delivery:** Release software faster and more frequently.  \n\n* **Reduce risk:** Minimize deployment errors and rollbacks.\n\n\n### The elements of GitLab CI/CD\n\n\n* `.gitlab-ci.yml`**:** This [YAML\nfile](https://docs.gitlab.com/ee/ci/yaml/), located in your project's root\ndirectory, defines your CI/CD pipeline, including stages, jobs, and\nrunners.  \n\n* [**GitLab Runner**](https://docs.gitlab.com/runner/)**:** This agent\nexecutes your CI/CD jobs on your infrastructure (e.g. physical machines,\nvirtual machines, Docker containers, or Kubernetes clusters).  \n\n* [**Stages**](https://docs.gitlab.com/ee/ci/yaml/#stages)**:** Stages\ndefine the order of execution for your jobs (e.g. build, test, and\ndeploy).  \n\n* [**Jobs**](https://docs.gitlab.com/ee/ci/yaml/#job-keywords)**:** Jobs are\nindividual units of work within a stage (e.g. compile code, run tests, and\ndeploy to staging).\n\n\n### Setting up GitLab CI\n\n\nGetting started with GitLab CI is simple. Here's a basic example of a\n`.gitlab-ci.yml` file:\n\n\n```yaml\n\n\nstages:\n  - build\n  - test\n  - deploy\n\nbuild_job:\n  stage: build\n  script:\n    - echo \"Building the application...\"\n\ntest_job:\n  stage: test\n  script:\n    - echo \"Running tests...\"\n\ndeploy_job:\n  stage: deploy\n  script:\n    - echo \"Deploying to production...\"\n  environment:\n    name: production\n\n```\n\n\nThis configuration defines three stages: \"build,\" \"test,\" and \"deploy.\" Each\nstage contains a job that executes a simple script.\n\n\n### CI/CD configuration examples\n\n\nLet's explore some more realistic examples.\n\n\n**Building and deploying a Node.js application**\n\n\nThe pipeline definition below outlines using npm to build and test a Node.js\napplication and [dpl](https://docs.gitlab.com/ci/examples/deployment/) to\ndeploy the application to Heroku. The deploy stage of the pipeline makes use\nof [GitLab CI/CD variables](https://docs.gitlab.com/ci/variables/), which\nallow developers to store sensitive information (e.g. credentials) and\nsecurely use them in CI/CD processes. In this example, an API key to deploy\nto Heroku is stored under the variable key name `$HEROKU_API_KEY` used by\nthe dpl tool.\n\n\n```yaml\n\n\nstages:\n  - build\n  - test\n  - deploy\n\nbuild:\n  stage: build\n  image: node:latest\n  script:\n    - npm install\n    - npm run build\n\ntest:\n  stage: test\n  image: node:latest\n  script:\n    - npm run test\n\ndeploy:\n  stage: deploy\n  image: ruby:latest\n  script:\n    - gem install dpl\n    - dpl --provider=heroku --app=$HEROKU_APP_NAME --api-key=$HEROKU_API_KEY\n\n```\n\n\n**Deploying to different environments (staging and production)**\n\n\nGitLab also offers the idea of\n[Environments](https://docs.gitlab.com/ci/environments/) with CI/CD. This\nfeature allows users to track deployments from CI/CD to infrastructure\ntargets. In the example below, the pipeline adds stages with an environment\nproperty for a staging and production environment. While the deploy_staging\nstage will always run its script, the deploy_production stage requires\nmanual approval to prevent accidental deployment to production.  \n\n\n```yaml\n\n\nstages:\n  - build\n  - test\n  - deploy_staging\n  - deploy_production\n\nbuild:\n  # ...\n\ntest:\n  # ...\n\ndeploy_staging:\n  stage: deploy_staging\n  script:\n    - echo \"Deploying to staging...\"\n  environment:\n    name: staging\n\ndeploy_production:\n  stage: deploy_production\n  script:\n    - echo \"Deploying to production...\"\n  environment:\n    name: production\n  when: manual  # Requires manual approval\n\n```\n\n\n### GitLab Auto DevOps\n\n\n[GitLab Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/)\nsimplifies CI/CD by providing a pre-defined configuration that automatically\nbuilds, tests, and deploys your applications. It leverages best practices\nand industry standards to streamline your workflow.\n\n\nTo enable Auto DevOps:\n\n\n1. Go to your project's **Settings > CI/CD > General pipelines**.  \n\n2. Enable the **Auto DevOps** option.\n\n\nAuto DevOps automatically detects your project's language and framework and\nconfigures the necessary build, test, and deployment stages. You don’t even\nneed to create a `.gitlab-ci.yml` file.\n\n\n### CI/CD Catalog\n\n\nThe [CI/CD\nCatalog](https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog/)\nis a list of projects with published [CI/CD\ncomponents](https://docs.gitlab.com/ee/ci/components/) you can use to extend\nyour CI/CD workflow. Anyone can create a component project and add it to the\nCI/CD Catalog or contribute to an existing project to improve the available\ncomponents. You can find published components in the [CI/CD\nCatalog](https://gitlab.com/explore/catalog) on GitLab.com.\n\n\n> [Tutorial: How to set up your first GitLab CI/CD\ncomponent](https://about.gitlab.com/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component/)\n\n\n### CI templates\n\n\nYou can also create your own [CI\ntemplates](https://docs.gitlab.com/ee/ci/examples/) to standardize and reuse\nCI/CD configurations across multiple projects. This promotes consistency and\nreduces duplication.\n\n\nTo create a CI template:\n\n\n1. Create a `.gitlab-ci.yml` file in a dedicated project or repository.  \n\n2. Define your CI/CD configuration in the template.  \n\n3. In your project's `.gitlab-ci.yml` file, use the `include` keyword to\ninclude the template.\n\n\n## Take your development to the next level\n\n\nGitLab CI/CD is a powerful tool that can transform your development\nworkflow. By understanding the concepts of CI/CD, configuring your\npipelines, and leveraging features like Auto DevOps, the CI/CD Catalog, and\nCI templates, you can automate your entire software development lifecycle\nand deliver high-quality software faster and more efficiently.\n\n\n> Want to take your learning to the next level? Sign up for [GitLab\nUniversity courses](https://university.gitlab.com/). Or you can get going\nright away with a [free trial of GitLab\nUltimate](https://about.gitlab.com/free-trial/).\n\n\n## \"Getting Started with GitLab\" series\n\n\nCheck out more articles in our \"Getting Started with GitLab\" series:\n\n\n- [How to manage\nusers](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-manage-users/)\n\n- [How to import your projects to\nGitLab](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-import-your-projects-to-gitlab/)  \n\n- [Mastering project\nmanagement](https://about.gitlab.com/blog/getting-started-with-gitlab-mastering-project-management/)\n\n- [Automating Agile workflows with the gitlab-triage\ngem](https://about.gitlab.com/blog/automating-agile-workflows-with-the-gitlab-triage-gem/)\n\n- [Working with CI/CD\nvariables](https://about.gitlab.com/blog/getting-started-with-gitlab-working-with-ci-cd-variables/)\n",[9,785,786,495,693,917],{"slug":2449,"featured":91,"template":700},"getting-started-with-gitlab-understanding-ci-cd","content:en-us:blog:getting-started-with-gitlab-understanding-ci-cd.yml","Getting Started With Gitlab Understanding Ci Cd","en-us/blog/getting-started-with-gitlab-understanding-ci-cd.yml","en-us/blog/getting-started-with-gitlab-understanding-ci-cd",{"_path":2455,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2456,"content":2461,"config":2467,"_id":2469,"_type":14,"title":2470,"_source":16,"_file":2471,"_stem":2472,"_extension":19},"/en-us/blog/getting-started-with-gitlab-working-with-ci-cd-variables",{"title":2457,"description":2458,"ogTitle":2457,"ogDescription":2458,"noIndex":6,"ogImage":1076,"ogUrl":2459,"ogSiteName":685,"ogType":686,"canonicalUrls":2459,"schema":2460},"Getting started with GitLab: Working with CI/CD variables","Learn what CI/CD variables are, why they are important in DevSecOps, and best practices for utilizing them.","https://about.gitlab.com/blog/getting-started-with-gitlab-working-with-ci-cd-variables","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with GitLab: Working with CI/CD variables\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab Team\"}],\n        \"datePublished\": \"2025-05-27\",\n      }",{"title":2457,"description":2458,"authors":2462,"heroImage":1076,"date":2464,"body":2465,"category":693,"tags":2466},[2463],"GitLab Team","2025-05-27","*Welcome to our \"Getting started with GitLab\" series, where we help\nnewcomers get familiar with the GitLab DevSecOps platform.*\n\n\nIn an earlier article, we explored [GitLab\nCI/CD](https://about.gitlab.com/blog/getting-started-with-gitlab-understanding-ci-cd/).\nNow, let's dive deeper into the world of **CI/CD variables** and unlock\ntheir full potential.\n\n\n### What are CI/CD variables?\n\n\nCI/CD variables are dynamic key-value pairs that you can define at different\nlevels within your GitLab environment (e.g., project, group, or instance).\nThese variables act as placeholders for values that you can use in your\n`.gitlab-ci.yml` file to customize your pipelines, securely store sensitive\ninformation, and make your CI/CD configuration more maintainable.\n\n\n### Why are CI/CD variables important?\n\n\nCI/CD variables offer numerous benefits:\n\n\n* **Flexibility** - Easily adapt your pipelines to different environments,\nconfigurations, or deployment targets without modifying your core CI/CD\nscript.  \n\n* **Security** - Securely store sensitive information like API keys,\npasswords, and tokens, preventing them from being exposed directly in your\ncode.  \n\n* **Maintainability** - Keep your CI/CD configuration clean and organized by\ncentralizing values in variables, making updates and modifications easier.  \n\n* **Reusability** - Define variables once and reuse them across multiple\nprojects, promoting consistency and reducing duplication.\n\n\n### Scopes of CI/CD variables: Project, group, and instance\n\n\nGitLab allows you to define CI/CD variables with different scopes,\ncontrolling their visibility and accessibility:\n\n\n* **Project-level variables** - These variables are specific to a single\nproject and are ideal for storing project-specific settings, such as:\n  * Deployment URLs: Define different URLs for staging and production environments.  \n  * Database credentials: Store database connection details for testing or deployment.  \n  * Feature flags: Enable or disable features during different stages of your pipeline.  \n  * Example: You have a project called \"MyWebApp\" and want to store the production deployment URL. You create a project-level variable named `DPROD_DEPLOY_URL` with the value `https://mywebapp.com`.  \n* **Group-level variables** - These variables are shared across all projects\nwithin a GitLab group. They are useful for settings that are common to\nmultiple projects, such as:\n\n  * API keys for shared services: Store API keys for services like AWS, Google Cloud, or Docker Hub that are used by multiple projects within the group.  \n  * Global configuration settings: Define common configuration parameters that apply to all projects in the group.  \n  * Example: You have a group called \"Web Apps\" and want to store an API key for Docker Hub. You create a group-level variable named `DOCKER_HUB_API_KEY` with the corresponding API key value.  \n* **Instance-level variables** - These variables are available to all\nprojects on a GitLab instance. They are typically used for global settings\nthat apply across an entire organization such as:\n\n  * Default runner registration token: Provide a default token for registering new [runners](https://docs.gitlab.com/runner/).  \n  * License information: Store license keys for GitLab features or third-party tools.  \n  * Global environment settings: Define environment variables that should be available to all projects.  \n  * Example: You want to set a default Docker image for all projects on your GitLab instance. You create an instance-level variable named `DEFAULT_DOCKER_IMAGE` with the value `ubuntu:latest`.\n\n### Defining CI/CD variables\n\n\nTo define a CI/CD variable:\n\n\n1. Click on the **Settings > CI/CD** buttons for  your project, group, or\ninstance.  \n\n2. Go to the **Variables** section.  \n\n3. Click **Add variable**.  \n\n4. Enter the **key** (e.g., `API_KEY`) and **value**.  \n\n5. Optionally, check the **Protect variable** box for sensitive information.\nThis ensures that the variable is only available to pipelines running on\nprotected branches or tags.  \n\n6. Optionally, check the **Mask variable** box to hide the variable's value\nfrom job logs, preventing accidental exposure.  \n\n7. Click **Save variable**.\n\n\n### Using CI/CD variables\n\n\nTo use a CI/CD variable in your `.gitlab-ci.yml` file, simply prefix the\nvariable name with `$`:\n\n\n```yaml\n\ndeploy_job:\n  script:\n    - echo \"Deploying to production...\"\n    - curl -H \"Authorization: Bearer $API_KEY\" https://api.example.com/deploy\n```\n\n\n### Predefined CI/CD variables\n\n\nGitLab provides a set of [predefined CI/CD\nvariables](https://docs.gitlab.com/ci/variables/predefined_variables/) that\nyou can use in your pipelines. These variables provide information about the\ncurrent pipeline, job, project, and more.\n\n\nSome commonly used predefined variables include:\n\n\n* `$CI_COMMIT_SHA`: The commit SHA of the current pipeline.  \n\n* `$CI_PROJECT_DIR`: The directory where the project is cloned.  \n\n* `$CI_PIPELINE_ID`: The ID of the current pipeline.  \n\n* `$CI_ENVIRONMENT_NAME`: The name of the environment being deployed to (if\napplicable).\n\n\n### Best practices\n\n\n* Securely manage sensitive variables: Use protected and masked variables\nfor API keys, passwords, and other sensitive information.  \n\n* Avoid hardcoding values: Use variables to store configuration values,\nmaking your pipelines more flexible and maintainable.  \n\n* Organize your variables: Use descriptive names and group related variables\ntogether for better organization.  \n\n* Use the appropriate scope: Choose the correct scope (project, group, or\ninstance) for your variables based on their intended use and visibility.\n\n\n### Unlock the power of variables\n\n\nCI/CD variables are a powerful tool for customizing and securing your GitLab\npipelines. By mastering variables and understanding their different scopes,\nyou can create more flexible, maintainable, and efficient workflows.\n\n\nWe hope you found it helpful and are now well-equipped to leverage the power\nof GitLab for your development projects.\n\n\n> Get started with CI/CD variables today with a [free trial of\nGitLab Ultimate with Duo Enterprise](https://about.gitlab.com/free-trial/).\n\n\n## \"Getting Started with GitLab\" series\n\nRead more articles in our \"Getting Started with GitLab\" series:\n\n\n- [How to manage\nusers](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-manage-users/)\n\n-  [How to import your projects to\nGitLab](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-import-your-projects-to-gitlab/)  \n\n- [Mastering project\nmanagement](https://about.gitlab.com/blog/getting-started-with-gitlab-mastering-project-management/)\n\n- [Automating Agile workflows with the gitlab-triage\ngem](https://about.gitlab.com/blog/automating-agile-workflows-with-the-gitlab-triage-gem/)\n\n- [Understanding\nCI/CD](https://about.gitlab.com/blog/getting-started-with-gitlab-understanding-ci-cd/)\n",[693,917,785,786,9,695],{"slug":2468,"featured":91,"template":700},"getting-started-with-gitlab-working-with-ci-cd-variables","content:en-us:blog:getting-started-with-gitlab-working-with-ci-cd-variables.yml","Getting Started With Gitlab Working With Ci Cd Variables","en-us/blog/getting-started-with-gitlab-working-with-ci-cd-variables.yml","en-us/blog/getting-started-with-gitlab-working-with-ci-cd-variables",{"_path":2474,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2475,"content":2481,"config":2486,"_id":2488,"_type":14,"title":2489,"_source":16,"_file":2490,"_stem":2491,"_extension":19},"/en-us/blog/github-to-gitlab-migration-made-easy",{"title":2476,"description":2477,"ogTitle":2476,"ogDescription":2477,"noIndex":6,"ogImage":2478,"ogUrl":2479,"ogSiteName":685,"ogType":686,"canonicalUrls":2479,"schema":2480},"GitHub to GitLab migration the easy way","Learn how easy it is to migrate from GitHub to GitLab using GitLab's project import functionality.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668776/Blog/Hero%20Images/julia-craice-faCwTallTC0-unsplash.jpg","https://about.gitlab.com/blog/github-to-gitlab-migration-made-easy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitHub to GitLab migration the easy way\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2023-07-11\",\n      }",{"title":2476,"description":2477,"authors":2482,"heroImage":2478,"date":2483,"body":2484,"category":741,"tags":2485},[1775],"2023-07-11","If you are using different CI/CD tools and are considering migrating over to\nGitLab, you may be wondering about\n\nthe difficulty of the migration process. Migration is usually a concern for\n[DevSecOps](https://about.gitlab.com/topics/devsecops/) teams when\nconsidering a new solution. This is due to the fact that migrating may\ninvolve heavy lifting. However, migrating to the GitLab AI-powered DevSecOps\nPlatform can be extremely simple and I will show you how step by step. \n\n\nIn this blog post, we will go over how to migrate from GitHub to GitLab\nusing our [project import](https://docs.gitlab.com/ee/user/project/import/)\nfunctionality. Manually migrating GitHub Actions to GitLab pipelines will be\ncovered as well. I have also created a video going over the migration\nprocess for those who prefer that format:\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/0Id5oMl1Kqs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n## What data can be migrated from GitHub to GitLab?\n\nGitLab's built-in importer allows for GitHub projects to be automatically\nmigrated into GitLab. The built-in importer\n\nis accessed directly from GitLab's project creation UI. From the UI, you can\nselect what data you wish to migrate to GitLab.\n\n\nThe data that can be migrated includes the following:\n\n* Repository description\n\n* Git repository data\n\n* Branch protection rules\n\n* Collaborators (members)\n\n* Issues\n\n* Pull requests\n\n* Wiki pages\n\n* Milestones\n\n* Labels\n\n* Release notes content\n\n* Release notes attachments\n\n* Comment attachments\n\n* Issue description attachments\n\n* Pull request description attachments\n\n* Pull request review comments\n\n* Regular issue and pull request comments\n\n* Git Large File Storage (LFS) objects\n\n* Pull request reviews\n\n* Pull request assigned reviewers\n\n* Pull request “merged by” information\n\n* Pull request comments replies in discussions\n\n* Pull request review comments suggestions\n\n* Issue events and pull requests events\n\n\nGitHub and GitLab have different naming conventions and concepts, so a\nmapping must be performed during the migration. For example, when\ncollaborators/members are migrated, roles from GitHub are mapped to the\nappropriate GitLab roles as follows:\n\n\n| GitHub role | GitLab role |\n\n| ----------- | ----------- |\n\n| Read        | Guest       |\n\n| Triage      | Reporter    |\n\n| Write       | Developer   |\n\n| Maintain    | Maintainer  |\n\n| Admin       | Owner       |\n\n\n## Prerequisites\n\nNow that you have an understanding of what can be imported, let's review the\nprerequisites for performing the migration.\n\n\nWith the GitLab importer, you can either import your projects from\n**GitHub.com** or **GitHub Enterprise** to either **GitLab.com** or\n**Self-managed GitLab** as long as you meet the following requirements:\n\n* You must be a Maintainer on the GitLab destination group you are importing\nto from GitHub\n\n* Each GitHub author and assignee in the repository must have a\npublic-facing email address on GitHub that matches their GitLab email\naddress\n\n* GitHub accounts must have a public-facing email address that is populated\n\n* [GitHub import\nsource](https://docs.gitlab.com/ee/administration/settings/visibility_and_access_controls.html#configure-allowed-import-sources)\nmust be enabled (Self-managed GitLab only)\n\n\nWhen migrating a user, GitLab uses the public-facing email address in GitHub\nto verify the user with the same email on GitLab. Because email ownership is\nunique, you'll know you have set a valid user with valid permissions.\n\n\n## Performing the import\n\nNow let's go over how to perform the migration. I will be migrating my\nproject, the [Reddit sentiment\nanalyzer](https://github.com/fishtoadsoft/reddit-sentiment-analyzer), from\nGitHub to GitLab. The Reddit sentiment analyzer contains a pull request\n(called a merge request in GitLab), issues, and comments. \n\n\n**Note:** While you may not have permissions to my project, the step-by-step\nprocess applies to any project you own. I am using my project so you can see\nhow I migrate GitHub Actions in the next section. Now, let's get started!\n\n\n1) Create a new project in GitLab using the [Project Creation\nInterface](https://gitlab.com/projects/new).\n\n\n2) Select the **Import Project** box. This allows you to migrate data from\nexternal sources.\n\n\n![Import project\nbox](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/import_project.png)\n\n\n3) Under **Import project from**, press the **GitHub** button. This will\ntake you to the **Authenticate with GitHub** page.\n\n\n4) Press the **Authenticate with GitHub** button. You can also use a\n[personal access\ntoken](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens)\nfrom GitHub with the **repo scope** if you prefer. This will take you to the\nGitHub authorization app.\n\n\n5) From here, you can grant access to [GitHub\norganization(s)](https://docs.github.com/en/organizations/collaborating-with-groups-in-organizations/about-organizations)\nwhere the projects you wish to migrate are located.\n\n\n![GitHub authorization\napp](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/github_authorize_app.png)\n\n\n6) Press the **Grant** button for the organization where the project you\nwish to migrate is stored.\n\n\n7) Press the **Authorize gitlabhq** button to grant GitLab access to the\norganization(s) selected. You will then be taken to the import selection\npage.\n\n\n8) From here, you can select the items you wish to import. \n\n\n![Import\nselection](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/import_selection.png)\n\n\n**Note:** The more items you choose to migrate, the longer the import will\ntake.\n\n\n9) Then you must set the GitLab location you want to migrate the GitHub\nproject to.\n\n\n![Set the GitLab location to migrate\nto](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/import_to.png)\n\n\n10) Press the **Import** button and the import will begin. You can see the\nprogress in the UI. Once the import is complete the status will be changed\nto \"complete.\"\n\n\n[Import progress\nstatus](/images/blogimages/2023-july-github-to-gitlab-migration/import_progress.png)\n\n\nNow you should have the imported project in your workspace. Mine is called\n[https://gitlab.com/awkwardferny/reddit-sentiment-analyzer](https://gitlab.com/awkwardferny/reddit-sentiment-analyzer).\nWhen examining the imported project, you can see the following:\n\n\n**Repository has been migrated**\n\n\n![Repository has been\nmigrated](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/migrated_data.png)\n\n\n**Issue has been migrated**\n\n\n![Issue has been\nmigrated](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/migrated_issue.png)\n\n\n**Merge request has been migrated**\n\n\n![Merge request has been\nmigrated](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/migrated_merge_request.png)\n\n\n## Migrating GitHub Actions over to GitLab CI/CD\n\nNow that you have migrated the project over from GitHub, notice that none of\nthe GitHub Actions are running. Don't worry, they are very easy to migrate\nmanually. So let's start the migration process for Actions.\n\n\n1) Examine the GitHub Actions within the **.github/workflows** folder. In\nthe [project you just\nimported](https://gitlab.com/awkwardferny/reddit-sentiment-analyzer/-/tree/master/.github/workflows),\nyou should see three different Action files:\n\n\n#### lint.yml\n\nThis file contains the Action, which performs linting on the source code\nusing flake8. It uses the python:3.10 Docker image and installs the\napplication requirements before performing the lint.\n\n\n```yaml\n\nname: \"Lint\"\n\n\non:\n  push:\n    branches: [ master ]\n  pull_request:\n    branches: [ master ]\n\njobs:\n  lint:\n    runs-on: ubuntu-latest\n    steps:\n    - uses: actions/checkout@v3\n    - name: Set up Python 3.10\n      uses: actions/setup-python@v4\n      with:\n        python-version: \"3.10\"\n    - name: Install dependencies\n      run: |\n        python -m pip install --upgrade pip\n        pip install flake8 pytest\n        if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n    - name: Lint with flake8\n      run: |\n        # stop the build if there are Python syntax errors or undefined names\n        flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics\n        # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide\n        flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics\n```\n\n\n#### smoke.yml\n\nThis file contains the action which performs a smoke test by just running\nthe CLI help menu. It uses the python:3.10 Docker image and installs the\napplication requirements before performing the smoke test.\n\n\n```yaml\n\nname: \"Smoke Tests\"\n\n\non:\n  push:\n    branches: [ master ]\n  pull_request:\n    branches: [ master ]\n\njobs:\n  smoke-tests:\n    runs-on: ubuntu-latest\n    steps:\n    - uses: actions/checkout@v3\n    - name: Set up Python 3.10\n      uses: actions/setup-python@v4\n      with:\n        python-version: \"3.10\"\n    - name: Install dependencies\n      run: |\n        python -m pip install --upgrade pip\n        pip install setuptools\n        if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n    - name: Install Sentiment Analysis Application\n      run: |\n        python setup.py install\n    - name: Run smoke tests\n      run: |\n        reddit-sentiment --help\n```\n\n\n#### unit.yml\n\nThis file contains the Action, which performs unit tests using pytest. It\nuses the python:3.10 Docker image and installs the application requirements\nrunning the unit tests.\n\n\n```yaml\n\nname: \"Unit Tests\"\n\n\non:\n  push:\n    branches: [ master ]\n  pull_request:\n    branches: [ master ]\n\njobs:\n  unit-tests:\n    runs-on: ubuntu-latest\n    steps:\n    - uses: actions/checkout@v3\n    - name: Set up Python 3.10\n      uses: actions/setup-python@v4\n      with:\n        python-version: \"3.10\"\n    - name: Install dependencies\n      run: |\n        python -m pip install --upgrade pip\n        pip install pytest\n        if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n    - name: Test with pytest\n      run: |\n        python -m pip install --upgrade pip\n        if [ -f test-requirements.txt ]; then pip install -r test-requirements.txt; fi\n        pytest tests/\n```\n\n\nNow let's go ahead and migrate these Actions over to GitLab.\n\n\n2) Go to the recently imported project on GitLab and open up the\n[WebIDE](https://docs.gitlab.com/ee/user/project/web_ide/).\n\n\n3) Create a file at the root called\n[**.gitlab-ci.yml**](https://docs.gitlab.com/ee/ci/yaml/gitlab_ci_yaml.html).\n\nThis file defines the GitLab pipeline.\n\n\n4) Add the following configuration, which will add the GitHub Actions as\nJobs in the GitLab pipeline. Notice the comments I added describing each\nsection.\n\n\n```yaml\n\n# This creates the stages in which the jobs will run. By default all\n\n# jobs will run in parallel in the stage. Once the jobs are completed\n\n# successfully then you move on to the next stage. The way jobs run\n\n# is completely configurable.\n\nstages:\n  - test\n\n# With the include statement, you can quickly add jobs which have\n\n# been pre-defined in external YAMLs. The SAST job I included below\n\n# is provided and maintained by GitLab and adds Static Application\n\n# Security Testing (SAST) to your pipeline.\n\ninclude:\n  - template: Jobs/SAST.gitlab-ci.yml\n\n# This is the unit test job which does exactly what is defined in\n\n# the GitHub Action in unit.yml. You can see it uses the python:3.10\n\n# Docker image, installs the application dependencies, and then runs\n\n# the unit tests with pytest. It was added with a simple copy and\n\n# paste and minor syntax changes.\n\nunit:\n  image: python:3.10\n  stage: test\n  before_script:\n    - python -m pip install --upgrade pip\n    - pip install pytest\n    - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n  script:\n    - pytest tests/\n\n# This is the lint job which does exactly what is defined in the\n\n# GitHub Action in lint.yml. You can see it uses the python:3.10\n\n# Docker image, installs the application dependencies, and then\n\n# performs the linting with flake8. It was added with a simple copy\n\n# and paste and minor syntax changes.\n\nlint:\n  image: python:3.10\n  stage: test\n  before_script:\n    - python -m pip install --upgrade pip\n    - pip install flake8\n    - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n  script:\n    - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics\n    - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics\n\n# This is the smoke test job which does exactly what is defined in\n\n# the GitHub Action in smoke.yml. You can see it uses the python:3.10\n\n# Docker image, installs the application dependencies, and then runs\n\n# the smoke tests with the Reddit sentiment analysis CLI. It was\n\n# added with a simple copy and paste and minor syntax changes.\n\nsmoke:\n  image: python:3.10\n  stage: test\n  before_script:\n    - python -m pip install --upgrade pip\n    - pip install setuptools\n    - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n    - python setup.py install\n  script:\n    - reddit-sentiment --help\n```\n\n\nYou can see that scripts being executed in GitLab match those scripts within\nthe GitHub Actions. The only thing that has really changed is the syntax\nsetting up the jobs and stages. To learn more on how to create and configure\npipelines, check out the [GitLab CI/CD\ndocumentation](https://docs.gitlab.com/ee/ci/).\n\n\n5) Let's check in the code. From the WebIDE click on the Source Control Tab\nin the side panel of the WebIDE. It is the [third icon from the\ntop](https://code.visualstudio.com/docs/sourcecontrol/overview#_commit).\nThen press the **Commit to 'main'** button, select **Continue**, and voila,\nyou should now have a running pipeline.\n\n\n6) Examine the pipeline and make sure the jobs are running properly. Go back\nto your project and click on the\n[pipeline](https://docs.gitlab.com/ee/ci/pipelines/) icon. You can see the\nthe four jobs we created have run.\n\n\n![Four jobs have\nrun](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/gitlab_jobs.png)\n\n\n7) Click on the **Unit** job and you can see that the unit tests were run\nsuccessfully.\n\n\n```bash\n\n$ pytest tests/\n\n============================= test session starts\n==============================\n\nplatform linux -- Python 3.10.11, pytest-7.3.1, pluggy-1.0.0\n\nrootdir: /builds/awkwardferny/reddit-sentiment-analyzer\n\ncollected 2 items\n\ntests/test_scraper.py ..                                                \n[100%]\n\n============================== 2 passed in 0.09s\n===============================\n\nCleaning up project directory and file based variables\n\n00:00\n\nJob succeeded\n\n```\n\n\nAnd that's how simple it is to migrate a project over from GitHub to GitLab!\n\n\n## What other platforms can GitLab import from?\n\nThe GitLab importer allows one-click migration from several other platforms.\nThese platforms include:\n\n* [Bitbucket\nCloud](https://docs.gitlab.com/ee/user/project/import/bitbucket.html)\n\n* [Bitbucket Server\n(Stash)](https://docs.gitlab.com/ee/user/project/import/bitbucket_server.html)\n\n* [FogBugz](https://docs.gitlab.com/ee/user/project/import/fogbugz.html)\n\n* [Gitea](https://docs.gitlab.com/ee/user/project/import/gitea.html)\n\n* [Repository by\nURL](https://docs.gitlab.com/ee/user/project/import/repo_by_url.html)\n\n* [Uploading a manifest file\n(AOSP)](https://docs.gitlab.com/ee/user/project/import/manifest.html)\n\n* [Jira (issues\nonly)](https://docs.gitlab.com/ee/user/project/import/jira.html)\n\n\nWe also have documentation covering how to migrate from these platforms:\n\n*\n[SVN](https://docs.gitlab.com/ee/user/project/import/#import-from-subversion)\n\n* [ClearCase](https://docs.gitlab.com/ee/user/project/import/clearcase.html)\n\n* [CVS](https://docs.gitlab.com/ee/user/project/import/cvs.html)\n\n* [Perforce](https://docs.gitlab.com/ee/user/project/import/perforce.html)\n\n* [TFVC](https://docs.gitlab.com/ee/user/project/import/tfvc.html)\n\n\n---\n\n\nThanks for reading! Now you know how easy it is to migrate from GitHub over\nto GitLab. For more information on GitLab\n\nand migrating from GitHub, follow the links below:\n\n\n* [GitHub-to-GitLab project migration\ndocumentation](https://docs.gitlab.com/ee/user/project/import/github.html)\n\n* [Available project\nimporters](https://docs.gitlab.com/ee/user/project/import/#available-project-importers)\n\n* [GitHub-to-GitLab migration video](https://youtu.be/0Id5oMl1Kqs)\n\n\nAlso, read how GitLab has been named a leader in the DevOps platforms space\nby\n[Gartner](https://about.gitlab.com/blog/gitlab-leader-gartner-magic-quadrant-devops-platforms/)\nand the integrated software delivery platforms space by\n[Forrester](https://about.gitlab.com/blog/gitlab-leader-forrester-wave-integrated-software-delivery-platforms/).\n\n\n_Cover image by [Julia\nCraice](https://unsplash.com/@jcraice?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non\n[Unsplash](https://unsplash.com/s/photos/migration?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)_\n",[9,917,696,495],{"slug":2487,"featured":6,"template":700},"github-to-gitlab-migration-made-easy","content:en-us:blog:github-to-gitlab-migration-made-easy.yml","Github To Gitlab Migration Made Easy","en-us/blog/github-to-gitlab-migration-made-easy.yml","en-us/blog/github-to-gitlab-migration-made-easy",{"_path":2493,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2494,"content":2500,"config":2505,"_id":2507,"_type":14,"title":2508,"_source":16,"_file":2509,"_stem":2510,"_extension":19},"/en-us/blog/gitlab-15-the-retrospective",{"title":2495,"description":2496,"ogTitle":2495,"ogDescription":2496,"noIndex":6,"ogImage":2497,"ogUrl":2498,"ogSiteName":685,"ogType":686,"canonicalUrls":2498,"schema":2499},"GitLab 15: The retrospective","GitLab was founded in 2011 but that was a world nearly unrecognizable today. Here's a look back at what life was like then.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667845/Blog/Hero%20Images/gl15.jpg","https://about.gitlab.com/blog/gitlab-15-the-retrospective","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab 15: The retrospective\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-06-13\",\n      }",{"title":2495,"description":2496,"authors":2501,"heroImage":2497,"date":2502,"body":2503,"category":741,"tags":2504},[1384],"2022-06-13","\nNo cloud native, no containers, and no remote work: Those were just a few of the things _missing_ from the technology landscape in 2011 when we launched GitLab 1.0. It’s been a journey, for sure. Here’s a look back at how far we’ve traveled to get to GitLab 15.\n\n## It started with source code management\n\nIn the beginning of GitLab there was source code management (SCM)... and that was it. Continuous integration (CI) became part of GitLab because our co-founder Dmitriy Zaporozhets got tired of having to keep the CI servers running separately, so we decided to bring continuous integration into the mix. Even then we knew it didn’t make sense for companies to “DIY” critical parts of their process. That being said, it did feel counterintuitive to bring SCM and CI together, but we tried it anyway. Continuous delivery (CD) eventually evolved out of the CI/SCM integration, but it is crazy to think that when we started GitLab, CI/CD was not really a consideration.\n\n## DIY DevOps really did exist\n\nWhat people were talking about, though, was DevOps, and specifically DIY DevOps because back then it was completely normal for teams to assemble a bunch of tools and call it done. When we would talk about the importance of fewer tools and more integration, people would turn up their noses. We heard a lot of “different tools for different things” and “many have sharp tools.” Today we know that a DevOps platform increases development speed and release cadences. But back then, gluing together tools was seen as normal.\n\n## What’s old is new again\n\nBack in the day there were lots of tools and also very different programming languages than we reach for today. In the 2014 era, developers often wrote code in Ruby or JavaScript, and kept things layers away from the microprocessor. Over the years, that’s changed drastically. [Rust](/blog/secure-rust-development-with-gitlab/) and Go – as just two examples – have brought us back to the processor and reflect today’s modern programming styles. It’s another sign of how drastically things have shifted over time.\n\n## It wasn’t cloud-y\n\nThe cloud was in its infancy when GitLab started and at the time we all thought it was probably a great solution for startups or small businesses, but perhaps not something that would ever be in widespread use. Fast-forward to today where most companies run their infrastructures in the cloud. Now it’s widely accepted a cloud native architecture helps teams deliver better software faster and cloud skepticism has drifted away.\n\n## Security was siloed\n\nSecurity teams, and tools, were completely separate entities when GitLab began and that, of course, made doing something inherently difficult even more so. Devs were asked to fix bugs without any context, process, or knowledge of deployment status, and naturally weren’t very excited about it all. Realizing this, we began slowly adding scans to our CI/CD steps so that security was part of the pipeline and not separate from it. The goal is to let developers and teams deal with security in an incremental way, rather than a large to-do list at the end of the process. And that [progress is ongoing](/blog/one-devops-platform-can-help-you-achieve-devsecops/).\n\n## Code review wasn’t integrated\n\nEleven years ago, code review wasn’t that different from security, i.e., it was something done in a distant time and place and without context. Today, merge requests are the hub of all the reviews, including code, security, and compliance, and the concept of “review” is firmly  embedded in the process. Code review itself is now getting a boost from machine learning (ML) with “suggested reviewer,” [a feature we’ve added in beta](/blog/unreview-a-year-later-how-gitlab-is-being-transformed-by-ml-powered-code-review/) at the time of this writing but will be coming to all of GitLab at some point during the 15.x releases.\n\n> You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.\n\n## Open source\n\nIt’s fair to say the open source community is stronger and more visible today than it was 11 years ago. GitLab came from the open source community and we continue to proudly define our company and product as open source. Through the years, we’ve tried to keep the open source enthusiasm going by creating an environment where [customers can and do contribute regularly](/blog/how-you-contribute-to-gitlabs-open-devops-platform/) to our product. We want to continue to preserve GitLab as an open source project as well as our community and the company that sustains it all.\n\n## It’s remotely possible\n\nAnd we can’t have a comprehensive retrospective without looking at the concept of remote work. It was practically unheard of in 2011 and, though it’s been normalized today, we spent a long time taking this journey alone. So today’s reality – that [successful asynchronous work](/blog/five-ways-to-scale-remote-work/) means having a platform to enable it – is especially satisfying for us.\n",[721,9,697],{"slug":2506,"featured":6,"template":700},"gitlab-15-the-retrospective","content:en-us:blog:gitlab-15-the-retrospective.yml","Gitlab 15 The Retrospective","en-us/blog/gitlab-15-the-retrospective.yml","en-us/blog/gitlab-15-the-retrospective",{"_path":2512,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2513,"content":2519,"config":2526,"_id":2528,"_type":14,"title":2529,"_source":16,"_file":2530,"_stem":2531,"_extension":19},"/en-us/blog/gitlab-ai-cicd-customization-toolkit",{"title":2514,"description":2515,"ogTitle":2514,"ogDescription":2515,"noIndex":6,"ogImage":2516,"ogUrl":2517,"ogSiteName":685,"ogType":686,"canonicalUrls":2517,"schema":2518},"GitLab AI, CI/CD and customization for secure scaled growth","Find out how the latest developments for the GitLab AI-powered DevSecOps Platform help organizations scale to enterprise levels.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679194/Blog/Hero%20Images/duo-blog-post.png","https://about.gitlab.com/blog/gitlab-ai-cicd-customization-toolkit","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Drive secure growth at scale: Your GitLab AI, CI/CD, and customization toolkit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mike Flouton\"}],\n        \"datePublished\": \"2023-10-31\",\n      }",{"title":2520,"description":2515,"authors":2521,"heroImage":2516,"date":2523,"body":2524,"category":849,"tags":2525},"Drive secure growth at scale: Your GitLab AI, CI/CD, and customization toolkit",[2522],"Mike Flouton","2023-10-31","\nScaling up to enterprise-level intensifies the demand for rapid, secure software delivery. Large organizations can easily fall into the trap of single-function silos, making collaboration tricky and slowing development. Over the past few months, we've introduced new capabilities for the GitLab AI-powered DevSecOps Platform to help teams address these hurdles, accelerate innovation, ensure compliance, and fortify their digital defenses.\n- [AI capabilities that reshape speed and security](#ai-capabilities-that-reshape-speed-and-security)\n- [A single, enterprise-ready DevSecOps platform](#a-single-enterprise-ready-devsecops-platform) \n- [A customizable solution that fits the way you work](#a-customizable-solution-that-fits-the-way-you-work)\n\nLet’s take a closer look at what we've been working on and how these advancements benefit growing organizations.\n\n> Bring the best practices of industry leaders to your team. Join GitLab and Nasdaq for an exciting discussion about AI, DevSecOps, and developer productivity. [Register for this webinar today!](https://page.gitlab.com/webcast-fy24q3-devsecops-ai-developer-productivity.html)\n\n## AI capabilities that reshape speed and security\nAI will transform the way organizations develop software. Our [State of AI in Software Development](https://about.gitlab.com/developer-survey/#ai) report, released earlier this year, demonstrates this: 83% of DevSecOps professionals surveyed said implementing AI in their software development processes is essential to avoid falling behind competitors. \n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/) is a powerful set of AI capabilities within GitLab’s DevSecOps Platform that helps to speed up development of code, improve operations, and secure software. Since its debut in June, we’ve been steadily expanding the suite of AI capabilities. These now extend across the entire software development lifecycle – from suggesting code, to finding and explaining vulnerabilities in code, to identifying appropriate code reviewers. As enterprises increase code generation, they can avoid potential bottlenecks, such as security checks, further downstream.\n\nFor example, we recently released our [GitLab Duo Vulnerability Explanation feature into Beta](https://about.gitlab.com/blog/remediating-vulnerabilities-with-insights-and-ai/). Typically, vulnerability discovery and mitigation would require a significant amount of back-and-forth between development and application security teams to agree on severity levels and approaches to fix the vulnerability. Vulnerability Explanation alleviates this inefficiency by summarizing detected vulnerabilities and their implications as well as providing in-depth solutions and suggested mitigation within the developer’s workflow, enabling faster resolution and creation of safer code within the development workflow. \n\n![GitLab Duo Vulnerability Explanation](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/ai_explain_this_vulnerability_results.png)\n\n\nFor even more efficiency, [GitLab Duo Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/) (Beta) helps developers create new code and update existing code faster. [GitLab Duo Suggested Reviewers](https://about.gitlab.com/blog/gitlab-suggested-reviewers/) (generally available to all users) helps teams make an informed decision when choosing reviewers that can meet their review criteria.\n\nLearn about [all GitLab Duo capabilities](https://about.gitlab.com/gitlab-duo/).\n\nWatch GitLab Duo capabilities in action.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/LifJdU3Qagw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\n## A single, enterprise-ready DevSecOps platform \nEnterprise needs from a software delivery platform are unique. A DevSecOps platform must support the ability to:\n- build for speed with adequate security guardrails right from the start\n- consolidate to a single platform, but still integrate with your existing solution\n- simply adopt and onboard developers, but handle the complexity of scale\n\nGitLab CI/CD is a core way for organizations to meet these requirements. As customers scale their adoption of GitLab, they run millions of CI/CD jobs on a monthly basis. With the efficiency improvements further driven by GitLab Duo, these numbers will likely increase. However, organizations will need to find efficiency opportunities throughout their development and deployment workflows to be able to handle this growth, ensuring that whatever is deploying into production meets their quality, security, and reliability standards.\n\nThe [GitLab CI/CD Component Catalog](https://about.gitlab.com/blog/introducing-ci-components/), which will soon be released into Beta, solves these problems by enabling organizations to standardize their pipelines and create building blocks in a centralized repository that can be easily discovered, reused, and shared across teams. Enterprises can develop base pipeline configurations with the proper compliance, quality, and security checks already built-in for use across their organization. \n\nHere are some more capabilities aimed at improving the enterprise platform experience:\n- The GitLab Runner ecosystem continues to expand as we've recently introduced [GitLab SaaS runners on MacOS](https://about.gitlab.com/releases/2023/09/22/gitlab-16-4-released/#macos-13-ventura-image-for-saas-runners-on-macos), [xlarge and 2xlarge SaaS Runners on Linux](https://about.gitlab.com/releases/2023/08/22/gitlab-16-3-released/#more-powerful-gitlab-saas-runners-on-linux), [increased storage on medium and large SaaS Runners on Linux](https://about.gitlab.com/releases/2023/06/22/gitlab-16-1-released/#increased-storage-for-gitlab-saas-runners-on-linux), and [GPU-enabled SaaS Runners on Linux](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/#gpu-enabled-saas-runners-on-linux) for supporting data science workloads.\n- GitLab Duo, which was previously only available for GitLab SaaS, is now extended to GitLab self-hosted. Enterprises that prefer to self-host or must self-host due to compliance and regulatory restrictions can now take advantage of our AI features, starting with [Code Suggestions](https://about.gitlab.com/blog/self-managed-support-for-code-suggestions/).\n- Organizations looking at using GitLab Packages as their consolidated package registry can now [import packages](https://docs.gitlab.com/ee/user/packages/package_registry/supported_functionality.html#importing-packages-from-other-repositories) from their current package registries like Maven Central or Artifactory. GitLab [supports importing](https://docs.gitlab.com/ee/user/packages/package_registry/supported_functionality.html#importing-packages-from-other-repositories) Maven, npm, NuGet, and PyPI package types into GitLab, with many more package formats to follow. \n\n## A customizable solution that fits the way you work\nAs companies grow, there is an increasing need to personalize development and deployment settings and provide distinct visibility into the DevSecOps lifecycle to users beyond the immediate DevSecOps teams. GitLab is designed to function effectively with minimal adjustments, yet it offers the flexibility to be tailored to the requirements of expanding organizations. \n\nOur recent developments, including [changes to product navigation](https://about.gitlab.com/blog/navigation-research-blog-post/), are driven by comprehensive user research. We recognize that each organization and its individual users have unique, preferred workflows. Our updated navigation features, such as pinning frequently accessed items, visualizing work, and simplifying navigation through fewer top-level items, empower DevSecOps teams to align the platform with their optimal environment and workflow.\n\nWatch the new and simplified navigation in action.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/rGTl9_HIpbY\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\nHere are some other highlights:\n- In addition to overhauling the navigation, we [introduced the rich text editor](https://about.gitlab.com/releases/2023/07/22/gitlab-16-2-released/#all-new-rich-text-editor-experience) by providing a “what you see is what you get” editing experience. The rich text editor is now available in all issues, epics, and merge requests.\n- GitLab offers [six out-of-the-box roles](https://docs.gitlab.com/ee/user/permissions.html#roles), but for many enterprises this was not enough. Some roles gave too much permission, while others didn’t grant enough permissions to complete a task. Enterprises needed a way to define their own roles – leading to [customizable roles](https://docs.gitlab.com/ee/user/custom_roles.html), which gives GitLab administrators the ability to define roles with granular permissions suited for their needs.\n- GitLab Value Streams Dashboard ensures that all stakeholders have visibility into the progress and value delivery metrics associated with software development and delivery. To align with customers’ needs to customize the data viewed and the appearance, we introduced [new velocity metrics](https://about.gitlab.com/releases/2023/08/22/gitlab-16-3-released/#new-velocity-metrics-in-the-value-streams-dashboard) and the ability to [customize the appearance and data](https://about.gitlab.com/releases/2023/07/22/gitlab-16-2-released/#new-customization-layer-for-the-value-streams-dashboard) to adjust metrics based on their areas of interest, filter out irrelevant information, and focus on the data that is most relevant to their analysis or decision-making process.\n\n![New velocity metrics in the Value Streams Dashboard](https://about.gitlab.com/images/16_3/16.3_vsd.mr_iss.png)\n\n\n## The enterprise awaits — get growing today\t\nOrganizations on a growth trajectory need a way to sustain that growth. They'll need to leverage the capabilities of AI to generate code faster — but they can't sacrifice quality or security. Organizations will also need to set standards for development and deployment that extend across the enterprise, and every user will need a clear and customizable view of the DevSecOps lifecycle. As we bring new capabilities into the GitLab DevSecOps Platform, we will continue to support these enterprise-class needs.\n\n> Bring the best practices of industry leaders to your team. Join GitLab and Nasdaq for an exciting discussion about AI, DevSecOps, and developer productivity. [Register for this webinar today!](https://page.gitlab.com/webcast-fy24q3-devsecops-ai-developer-productivity.html)\n\n\n**Disclaimer:** This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.\n",[693,9,851,695],{"slug":2527,"featured":6,"template":700},"gitlab-ai-cicd-customization-toolkit","content:en-us:blog:gitlab-ai-cicd-customization-toolkit.yml","Gitlab Ai Cicd Customization Toolkit","en-us/blog/gitlab-ai-cicd-customization-toolkit.yml","en-us/blog/gitlab-ai-cicd-customization-toolkit",{"_path":2533,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2534,"content":2539,"config":2544,"_id":2546,"_type":14,"title":2547,"_source":16,"_file":2548,"_stem":2549,"_extension":19},"/en-us/blog/gitlab-and-redhat-automation",{"title":2535,"description":2536,"ogTitle":2535,"ogDescription":2536,"noIndex":6,"ogImage":1200,"ogUrl":2537,"ogSiteName":685,"ogType":686,"canonicalUrls":2537,"schema":2538},"GitLab and Red Hat: Automation to enhance secure software development","How our closer relationship with Red Hat will boost deployment automation.","https://about.gitlab.com/blog/gitlab-and-redhat-automation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and Red Hat: Automation to enhance secure software development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vick Kelkar\"}],\n        \"datePublished\": \"2020-04-29\",\n      }",{"title":2535,"description":2536,"authors":2540,"heroImage":1200,"date":1205,"body":2542,"category":1062,"tags":2543},[2541],"Vick Kelkar","\n\nWe're working towards a closer relationship with Red Hat and we're excited about the possibilities. We think developers can reduce time spent coding while still increase productivity with technologies from GitLab and Red Hat. Here's what you need to know.\n\n### Why GitLab?\n\nGitLab enables both the developers and operations teams to apply [DevOps](/topics/devops/) practices using a single application. Using one tool for the entire application’s lifecycle, i.e. right from development and deployment to operations, allows the organization to achieve operational efficiency and reduce deployment cycle times.\n\nGitLab not only provides source code management ([SCM](/solutions/source-code-management/)) but it also offers CI/CD to make streamlined deployments to a container platform like Red Hat OpenShift while maintaining visibility into the deployment pipelines. Furthermore, with [AutoDevOps](https://docs.gitlab.com/ee/topics/autodevops/), the GitLab application also addresses the organization’s security requirements through scanning and dependency mapping for the developed application. The ability to check the license of software being used, before deploying it in a production environment, helps organizations reduce their [compliance risks](/solutions/compliance/).\n\n### Why GitLab with Red Hat?\n\nRed Hat has a number of technologies in its portfolio. At the core is Red Hat Enterprise Linux ([RHEL](https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux)), an enterprise-grade Linux operating system (OS) platform used by many Fortune 500 companies that can be deployed across the hybrid cloud, from bare-metal and virtual servers to private and public cloud environments. RHEL makes it easier for the operations team to manage the upgrades, security patches and life cycles of servers being used to run applications like GitLab. Red Hat also provides the industry’s most comprehensive enterprise Kubernetes platform in Red Hat OpenShift. OpenShift is uniquely positioned to run a containerized application on a public or private cloud.\n\nGitLab can accelerate software development and deployment of applications while RHEL can act as the more secure, fully managed OS that can scale with the application. The inclusion of new DevOps tools in Red Hat’s hybrid cloud technologies like [service mesh](https://www.openshift.com/blog/red-hat-openshift-service-mesh-is-now-available-what-you-should-know) empowers developers to iterate faster on a foundation of trusted enterprise Linux.\n\nThe GitLab solution, which includes [CI/CD workflow](/topics/ci-cd/), an AutoDevOps workflow, a container registry, and Kubernetes integration can be deployed on RHEL using [install](/install/) instructions and you can find out more about GitLab SaaS pricing model [here](/pricing/#gitlab-com). You can read our sales [FAQ](/sales/#faq) or contact our [sales team](/sales/) if you have questions about the offering.\n\nGitLab can be deployed on RHEL-based machines to provide organizations with DevOps infrastructure and collaboration tools. Our collaboration with Red Hat doesn't stop as a supported platform for the GitLab Server but Red Hat OpenShift can also be a target for our CI/CD and Auto DevOps workflows. Application container images can be pushed to our registry and used to deploy applications into Red Hat OpenShift.\n\n### What’s Next?\n\nAs GitLab and Red Hat increase their collaboration, we plan to announce the availability of GitLab Runner Operator for OpenShift in the near future. At GitLab, we have an [engineering epic](https://gitlab.com/groups/gitlab-org/-/epics/2068) underway to develop first-class support for OpenShift.\n\nWith the upcoming product integrations with Red Hat, GitLab is striving to increase collaboration in the organization, increase developer velocity and reduce friction between teams, regardless of the deployment models of VMs or containers. The overarching goal is to help organizations improve their [DevSecOps](/solutions/security-compliance/) posture while significantly reducing security and compliance risks.\n\n### Resources\n\n- [GitOps:The Future of Infrastructure Automation - A panel discussion with Weaveworks, HashiCorp, Red Hat, and GitLab](https://about.gitlab.com/why/gitops-infrastructure-automation/)\n- [RHEL 8 Install documentation](https://about.gitlab.com/install/#centos-8)\n- [and RHEL 7 Install documentation](https://about.gitlab.com/install/#centos-7)\n- [GitLab on Microsoft Azure](https://docs.gitlab.com/ee/install/azure/)\n- [Try OpenShift](https://www.openshift.com/try)\n",[999,9,830,721,1228],{"slug":2545,"featured":6,"template":700},"gitlab-and-redhat-automation","content:en-us:blog:gitlab-and-redhat-automation.yml","Gitlab And Redhat Automation","en-us/blog/gitlab-and-redhat-automation.yml","en-us/blog/gitlab-and-redhat-automation",{"_path":2551,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2552,"content":2557,"config":2563,"_id":2565,"_type":14,"title":2566,"_source":16,"_file":2567,"_stem":2568,"_extension":19},"/en-us/blog/gitlab-arm-aws-graviton2-solution",{"title":2553,"description":2554,"ogTitle":2553,"ogDescription":2554,"noIndex":6,"ogImage":1200,"ogUrl":2555,"ogSiteName":685,"ogType":686,"canonicalUrls":2555,"schema":2556},"Announcing 32/64-bit Arm Runner Support for AWS Graviton2","GitLab enables CI/CD solution on Arm-based AWS Graviton2 instances.","https://about.gitlab.com/blog/gitlab-arm-aws-graviton2-solution","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Announcing 32/64-bit Arm Runner Support for AWS Graviton2\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kushal Koolwal\"}],\n        \"datePublished\": \"2020-05-15\",\n      }",{"title":2553,"description":2554,"authors":2558,"heroImage":1200,"date":2560,"body":2561,"category":1062,"tags":2562},[2559],"Kushal Koolwal","2020-05-15","\n\n_Kushal Koolwal is senior manager, Software Ecosystem Development at Arm Inc._\n\nAt Arm TechCon 2019, GitLab and Arm [announced](/blog/devops-on-the-edge-a-conversation-about-gitlab-and-arm/) a joint partnership with the goal of providing first class citizen support for Arm architecture starting with [GitLab’s CI/CD tool](/topics/ci-cd/).\n\n\"Arm is on a mission to make cloud-native developers’ experience frictionless by building out the software stack and enabling a complete set of developer tools,\" says [Pete Goldberg](/company/team/#pete_goldberg), director of Partnerships, GitLab. \"Amazon Web Services (AWS) is the first major cloud provider to build and deploy Arm-powered compute instances. GitLab is proud to be Arm’s CI/CD solution, enabling DevOps to seamlessly certify new and existing applications in production environments hosted on AWS Graviton2.\"\n\n### GitLab and Arm announcement and partnership enhancements\n\nToday, the partnership achieved another major milestone in its partnership efforts with the delivery of official support for 32-bit and 64-bit Arm-based GitLab runners in binary, rpm/deb packaging, and Docker image format.\n\n#### This milestone highlights the following enhancements:\n\n\n*   Arm [Runner binaries](https://gitlab-runner-downloads.s3.amazonaws.com/latest/index.html) were made available in the 12.6 release as part of the Graviton2 launch at [AWS re:Invent 2019](/blog/updates-from-aws-reinvent/), allowing developers to start immediately in their custom environments.\n*   [RPM/DEB packages](https://packages.gitlab.com/runner/gitlab-runner) for easier install/upgrade in 12.9 release.\n*   Native Arm [Docker image](https://hub.docker.com/r/gitlab/gitlab-runner/tags) in 13.0 release for container-based environments.\n\n#### As a testament to the strength of the partnership, GitLab has:\n\n*   [Released a demo showing how to deploy and AWS Graviton2 M6g Instance](https://youtu.be/0dntra12w6w)\n*   [Joined](https://developer.arm.com/solutions/infrastructure/developer-resources/ci-cd/gitlab) the [Arm Neoverse developer program ](https://developer.arm.com/solutions/infrastructure/developer-resources/ci-cd/gitlab)\n*   Adding support for Arm architectures for [Auto DevOps](https://gitlab.com/gitlab-org/gitlab/-/issues/214552) and [Omnibus](https://gitlab.com/gitlab-org/omnibus-gitlab/issues/1625)\n\nLearn more about EC2 M6g Instances, powered by AWS Graviton2, [here](https://aws.amazon.com/blogs/aws/new-m6g-ec2-instances-powered-by-arm-based-aws-graviton2/).\n",[232,9,896],{"slug":2564,"featured":6,"template":700},"gitlab-arm-aws-graviton2-solution","content:en-us:blog:gitlab-arm-aws-graviton2-solution.yml","Gitlab Arm Aws Graviton2 Solution","en-us/blog/gitlab-arm-aws-graviton2-solution.yml","en-us/blog/gitlab-arm-aws-graviton2-solution",{"_path":2570,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2571,"content":2577,"config":2582,"_id":2584,"_type":14,"title":2585,"_source":16,"_file":2586,"_stem":2587,"_extension":19},"/en-us/blog/gitlab-ci-cd-is-for-multi-cloud",{"title":2572,"description":2573,"ogTitle":2572,"ogDescription":2573,"noIndex":6,"ogImage":2574,"ogUrl":2575,"ogSiteName":685,"ogType":686,"canonicalUrls":2575,"schema":2576},"GitLab CI/CD is for multi-cloud","Can cloud providers (and their tools) ever be cloud agnostic? We discuss GitHub Actions and GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678401/Blog/Hero%20Images/gitlab-for-multicloud.jpg","https://about.gitlab.com/blog/gitlab-ci-cd-is-for-multi-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab CI/CD is for multi-cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-11-06\",\n      }",{"title":2572,"description":2573,"authors":2578,"heroImage":2574,"date":2579,"body":2580,"category":1040,"tags":2581},[715],"2019-11-06","\nAs organizations continue to go all-in on cloud-first strategies, optimizing their cloud architectures is becoming a top priority. It’s estimated that investments in infrastructure to support cloud computing account for [more than a third of all IT spending](https://www.zdnet.com/article/top-cloud-providers-2019-aws-microsoft-azure-google-cloud-ibm-makes-hybrid-move-salesforce-dominates-saas/). Using multiple cloud providers with multiple cloud services requires an architecture that enables workflow portability, and organizations will need an unbiased, multi-cloud strategy to make that a reality.\n\n## What is multi-cloud?\n\nMulti-cloud describes [how enterprises use multiple cloud providers to meet different technical or business requirements](https://www.zdnet.com/article/multicloud-everything-you-need-to-know-about-the-biggest-trend-in-cloud-computing/). At its core, multi-cloud is made possible through cloud-native applications built from containers using services from different cloud providers. It allows for multiple services to be managed in one architecture. [85% of enterprises currently operate in multiple clouds](https://www.ibm.com/blogs/cloud-computing/2018/10/19/survey-multicloud-management-tools/), but just because an organization uses multiple cloud providers doesn’t necessarily mean they are multi-cloud.\n\nBeing dependent on one cloud provider can limit the flexibility of an organization and leave it susceptible to vendor lock-in. Workflow portability is one of the benefits of multi-cloud and it enables a seamless workflow, regardless of _where_ you deploy.\n\nIn addition to workflow portability, there are several reasons why most businesses have adopted multi-cloud, and why more will continue to use this approach:\n\n*   **Greater flexibility**: Each cloud vendor shines in some areas and is weak in others. Using multiple vendors lets you use the right tool for the job.\n*   **Better acquisitions**: Whether an organization wants to grow through acquisitions (or be acquired itself), existing systems can work within another company’s infrastructure, even if both are using separate cloud providers.\n*   **Increased resilience**: Architecting failover between multiple cloud providers lets you stay up even if one of your vendors is down.\n*   **Improved cloud negotiations**: If another cloud vendor offers better terms or significant credits, businesses can have better leverage because their [DevOps processes](/topics/devops/) are not tied to vendor-specific services.\n*   **Fewer conflicts of interest**: With cloud service providers offering so many different services, you’re less likely to find yourself [in conflict with customers competing in those same spaces](https://www.cnbc.com/2017/06/21/wal-mart-is-reportedly-telling-its-tech-vendors-to-leave-amazons-cloud.html).\n\nA multi-cloud strategy allows organizations to use the tools and services that work best for the job, not just tools that work within their cloud environment.\n\n## Can cloud providers really support multi-cloud?\n\nCloud service providers continually compete with each other to provide more services to keep customers in their cloud. The more services you have with one CSP, the less likely you are to migrate those workloads. AWS offers 90 different services, as does GCP. In comparison, [Microsoft lists over 160 services on its Azure product page](https://www.parkmycloud.com/cloud-services-comparison/) and many of them are integrations with other Microsoft products. Cloud service providers want to have more of your business by making you more dependent on their specific services.\n\nEven though most cloud providers claim to support multi-cloud, migrating workloads out of their cloud isn’t in their best interest. As cloud computing is a pay-per-use model, it seems unlikely that multi-cloud would be a goal for the large cloud providers.\n\n## Implementing CI/CD in the cloud\n\nIn the [RightScale 2019 State of the Cloud Report](https://info.flexera.com/CM-REPORT-State-of-the-Cloud), 33% of respondents mentioned [implementing CI/CD](/topics/ci-cd/) in the cloud as a top cloud initiative. DevOps processes play a big role in multi-cloud deployments, so if organizations are wanting to build faster and deploy anywhere, CI/CD will be a key factor in that success. Multi-cloud is all about being cloud-agnostic, and your tools should also support that goal.\n\nBut what if your CI/CD comes from a cloud provider?\n\n### GitHub Actions and GitLab CI/CD\n\nIn 2018, [GitHub announced Actions](/blog/github-launch-continuous-integration/) with CI-like functionality built into a single application offering. The industry has shown us in the past year that single application functionality [is becoming a trend](/blog/built-in-ci-cd-version-control-secret/), and GitLab has been a part of that single application message since the beginning. Now that continuous integration has caught up with the importance of single application, we have to examine how both GitHub and GitLab fit into multi-cloud deployments.\n\nIn June 2018 [Microsoft acquired GitHub](/blog/microsoft-acquires-github/), which really affirmed the importance of software developers and modern DevOps. Developer tools have a high capacity for driving cloud usage because once you have your application code hosted, the natural next step is finding a place to deploy it. From a strategic standpoint, this acquisition made a lot of sense for Microsoft because they could use [GitHub’s popularity as a source code management tool as a springboard for greater Azure adoption](https://www.techrepublic.com/article/with-github-acquisition-microsoft-wants-to-make-azure-the-default-cloud-for-developers/).\n\nWhen we talk about multi-cloud in the CI/CD conversation, cloud-agnosticism kind of goes out the window when it comes to GitHub Actions. GitHub’s ubiquity in the SCM market means that millions of developers are using that platform, and it’s those users that [made GitHub such an appealing asset for Microsoft](/blog/microsoft-acquires-github/).\n\nGitLab, in comparison, is cloud-independent. When organizations use GitLab CI/CD, there is no conflict of interest in using one cloud provider over another. Being truly cloud-agnostic means that GitLab provides a complete [DevOps platform](/solutions/devops-platform/) that allows teams to have the same productivity metrics, the same governance, regardless of what cloud you use.\n\n“Choosing a cloud provider should depend on the company’s business objectives, it should not be constrained by technology, and GitLab wants to enable every one of our customers to have this freedom,” says [Sid Silbrandij](/company/team/#sytses), co-founder and CEO at GitLab.\n\n## Multi-cloud should mean any cloud\n\nBusinesses want to choose cloud providers for their inherent value and use the services that best meet their needs. In turn, we should expect our DevOps processes to support multi-cloud objectives. Partnering with cloud-agnostic vendors provides a consistent workflow across all clouds, and CI/CD will play a big role in the multi-cloud future.\n\nWe’d love for you to watch our webcast _Mastering your CI/CD_ so you can see for yourself how GitLab’s industry-leading CI/CD helps teams build, test, deploy, and monitor code on any cloud.\n\n[Watch the webcast](/competition/github/)\n{: .alert .alert-gitlab-purple .text-center}\n\nCover image by [Alexandre Chambon](https://unsplash.com/@goodspleen?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n",[9,875,721],{"slug":2583,"featured":6,"template":700},"gitlab-ci-cd-is-for-multi-cloud","content:en-us:blog:gitlab-ci-cd-is-for-multi-cloud.yml","Gitlab Ci Cd Is For Multi Cloud","en-us/blog/gitlab-ci-cd-is-for-multi-cloud.yml","en-us/blog/gitlab-ci-cd-is-for-multi-cloud",{"_path":2589,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2590,"content":2595,"config":2600,"_id":2602,"_type":14,"title":2603,"_source":16,"_file":2604,"_stem":2605,"_extension":19},"/en-us/blog/gitlab-ci-cd-with-firebase",{"title":2591,"description":2592,"ogTitle":2591,"ogDescription":2592,"noIndex":6,"ogImage":1200,"ogUrl":2593,"ogSiteName":685,"ogType":686,"canonicalUrls":2593,"schema":2594},"How to leverage GitLab CI/CD for Google Firebase","Firebase is a powerful backend-as-a-service tool and when combined with GitLab it can be easy to enable continuous deployment of database, serverless and apps.","https://about.gitlab.com/blog/gitlab-ci-cd-with-firebase","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to leverage GitLab CI/CD for Google Firebase\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2020-03-16\",\n      }",{"title":2591,"description":2592,"authors":2596,"heroImage":1200,"date":2597,"body":2598,"category":718,"tags":2599},[1384],"2020-03-16","\n\nBuilding mobile apps can be painful - especially when it comes to finding a way to provide all the tooling needed to make the application feasible without becoming an expert in many different disciplines. [Firebase from Google](https://firebase.google.com/) aims to take away that burden by providing an app deployment platform and a BaaS or Backend-as-a-Service. While the offerings can vary greatly, most BaaS providers include a database, object storage, push notifications and some sort of hosting package. Firebase goes beyond this and provides user authentication built-in as well as [serverless](/topics/serverless/) functions, telemetry, and Google tools for growth.  \n\nThose tools are very appealing to mobile and even web-app developers and Firebase has been successful in that market with customers including The New York Times, Lyft and Duolingo just to name a few. But even with all of the fantastic BaaS tools Firebase brings to bear on a project, it is critical to have source code management and [CI/CD tools](/topics/ci-cd/) to match. As Firebase configuration for important settings such as database security, serverless functions, and hosting can all be stored “as-code” inside your application’s repository, GitLab paired with Firebase can make for a powerful duo.\n\n## Our app\n\nOur application will be a relatively simple link shortener for use with the domain [labwork.dev](https://labwork.dev). In order to build a link shortener, we’ll need the ability to log users in, a database for storing the links and a way to redirect folks coming with the short links to the longer website. Firebase comes with these items packaged together - which should make it relatively painless to stand up (famous last words right?).\n\nI plan on covering the application in more detail in the future, or if you want to jump to the end you can find the [completed project here](https://gitlab.com/brendan-demo/labwork/homepage/). For now, I wanted to at least introduce the architecture plan. I’ll use [Vue.js](https://vuejs.org) for the frontend. Vue.js is a web application that lets users log in using Firebase Authentication. Once logged in, users will have access to a form that allows them to create new short URLs. That form will call a Firebase Function that checks to see if the shortcode requests already exist (or create a random hash if not specified). If the shortcode is unique, the function adds the shortcode and longer URL to the `urls` collection in Firestore and returns okay.  \n\nOnce the shortcode is in the database, I’ll use another cloud function to retrieve the long URL associated with it. Firestore has a great feature that allows you to redirect traffic based on a pattern to a specified function, and I’ll use this so that anything that comes to `/go/{shortcode}` gets magically redirected to the correct long URL.\n\n![Basic Architecture Diagram](https://about.gitlab.com/images/blogimages/firebase_01.png){: .shadow.large.center}\n\n## Add Firebase to the project\n\nOnce we have this architecture finalized, and have built the skeleton of the project and are ready to start deploying and testing, it’s time to add Firebase to our project. Firebase provides a [very helpful CLI tool](https://github.com/firebase/firebase-tools) for getting started here and we’ll use that to begin.\n\nThe first command `firebase init` starts the project initialization process.\n\n![Output of firebase init command](https://about.gitlab.com/images/blogimages/firebase_02.png){: .shadow.large.center}\n\nFrom there, you can select which services you want to use with this project. You’ll also be able to decide to create a new Firebase project, or use one you previously created in the [Firebase console](https://console.firebase.google.com/). You also can select where to store the configuration files. I’ll add a folder called `firebase-config` to store all of these files. Now you are able to source control all changes to your Firebase architecture - from indexes to security rules - all in the same repository as your project.\n\n![Firebase config files](https://about.gitlab.com/images/blogimages/firebase_03.png){: .shadow.large.center}\n\nYou can see all of the changes required to add Firebase to the project [in this merge request](https://gitlab.com/brendan-demo/labwork/homepage/-/merge_requests/1).\n\n## Deploy project to Firebase\n\nNow that Firebase is installed in our project folder and configured, we’re ready to deploy for the first time. In order to deploy the Vue.js portion of the project, we first need to build it to production HTML, CSS and Javascript. So before deployment, run the `yarn build` command.  This will output the build to the `dist` folder by default, and I’ve configured Firebase to recognize that directory as the hosting direction in the `firebase.json`.\n\n![Firebase.json example](https://about.gitlab.com/images/blogimages/firebase_04.png){: .shadow.large.center}\n\nOnce the project is built, running a simple `firebase deploy` will deploy ALL of the features of the project to Firebase: the security rules and indexes for Firestore, the Firebase Functions and the Vue.js project to Firebase Hosting.\n\nIf desired, we can also chose to deploy just a particular part of the project with the `--only` flag. For example, to only deploy a new version of the functions, we can say \n\n`firebase deploy --only functions`\n\nThis is a feature that we’ll combine with GitLab CI/CD in the next step to make our deployments as efficient as possible.\n\n## Automate deployments with GitLab CI/CD\n\nNow that we have the project deploying, we can automate that deploy process so that we don’t have to be at our computer authenticated to Firebase in order to deploy new changes. The steps to automate the deploy are relatively painless and include: (1) acquire a Firebase API key to use during deployment, (2) setup the `.gitlab-ci.yml` file to install the firebase CLI before running any other steps and (3) issue the deployment commands for each part of the infrastructure depending on the change in a particular commit to the main branch.\n\nFirst, we need an API key so that GitLab CI/CD can authenticate to Firebase and perform the deploy. To get the API key, we can run `firebase login:ci` from the same place we were deploying the application previously. This will provide a key that looks something like `` which we’ll add to GitLab.\n\nWhen you enter `firebase login:ci`, open the URL provided in your browser. That will open a Google authentication page; then log in with your Google account and click `Allow`.  Then return to the terminal and you’ll see the authentication code.\n\n![Output of firebase login:ci command](https://about.gitlab.com/images/blogimages/firebase_05.png){: .shadow.large.center}\n\nOnce you’ve successfully authenticated and obtained the token, go to your project on GitLab and go to Settings -> CI/CD -> Variables. Here’s where we’ll add the token as an environmental variable to be used in our deployment jobs. The key is `FIREBASE_TOKEN` and then the value is the token that was printed to your terminal. I’ve made mine both a [protected](https://docs.gitlab.com/ee/ci/variables/#protected-environment-variables) and [masked](https://docs.gitlab.com/ee/ci/variables/#masked-variables) variable. That means the variable will only be exposed to protected branches and if it’s accidentally echoed to the job output, GitLab will hide it from leaking into there.\n\n![Varaiable configuration screen in GitLab](https://about.gitlab.com/images/blogimages/firebase_06.png){: .shadow.large.center}\n\nNow we can start on the configuration for our `.gitlab-ci.yml`.  At the top of the file I’m going to set the default image to be the current node alpine image from Docker hub:\n\n```yaml\nimage: node:12.13.0-alpine\n```\n\nNext, I’ll create a `before_script` that will install the firebase CLI before running any jobs in the file. In the future, I could bundle that CLI into my own custom Docker image to avoid doing this every time, but for now I’ll go with the boring solution.\n\n```yaml\nbefore_script:\n  - npm i -g firebase-tools\n```\n\nFor the build steps, I want to create a separate job for each part of the infrastructure: Firestore, Functions and the Vue app into Firebase Hosting. To do this, I’m going to utilize the ﻿﻿[`only:`](https://docs.gitlab.com/ee/ci/yaml/#only--except) feature to only deploy that part of the infrastructure impacted by changes and that have been merged to master. For example, we’ll only deploy the Firebase Functions when something changes in the `/functions` on the `master` branch\n\n```yaml\ndeploy-functions:\n  stage: deploy\n  script:\n    - cd functions\n    - npm install\n    - cd ..\n    - firebase deploy --only functions --token $FIREBASE_TOKEN\n  only:\n    refs:\n      - master\n    changes:\n      - functions/**/*\n```\nWe’ll repeat this same pattern for both Firestore and the Hosting project, adding the `yarn build` step before deploying hosting each time. Once that’s completed, every time a merge request is accepted, GitLab CI/CD will automatically deploy the changes into our live production application. You can view the [completed `.gitlab-ci.yml` here](https://gitlab.com/brendan-demo/labwork/homepage/-/blob/master/.gitlab-ci.yml), or check out the link shortener for yourself (and try and [Rick Roll](https://labwork.dev/go/30201a) your friends at [labwork.dev](https://labwork.dev)).\n",[9,830,232],{"slug":2601,"featured":6,"template":700},"gitlab-ci-cd-with-firebase","content:en-us:blog:gitlab-ci-cd-with-firebase.yml","Gitlab Ci Cd With Firebase","en-us/blog/gitlab-ci-cd-with-firebase.yml","en-us/blog/gitlab-ci-cd-with-firebase",{"_path":2607,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2608,"content":2614,"config":2620,"_id":2622,"_type":14,"title":2623,"_source":16,"_file":2624,"_stem":2625,"_extension":19},"/en-us/blog/gitlab-ci-on-google-kubernetes-engine",{"title":2609,"description":2610,"ogTitle":2609,"ogDescription":2610,"noIndex":6,"ogImage":2611,"ogUrl":2612,"ogSiteName":685,"ogType":686,"canonicalUrls":2612,"schema":2613},"GitLab CI/CD on Google Kubernetes Engine in 15 minutes or less","Install GitLab's Runner on GKE in a few simple steps and get started with GitLab CI/CD pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667003/Blog/Hero%20Images/gke_in_15_cover_2.jpg","https://about.gitlab.com/blog/gitlab-ci-on-google-kubernetes-engine","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab CI/CD on Google Kubernetes Engine in 15 minutes or less\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Elliot Rushton\"}],\n        \"datePublished\": \"2020-03-27\",\n      }",{"title":2609,"description":2610,"authors":2615,"heroImage":2611,"date":2616,"body":2617,"category":718,"tags":2618},[1185],"2020-03-27","If you use [GitLab Self-Managed](/pricing/#self-managed), then getting started with GitLab CI using [GitLab's integration with Google Kubernetes Engine (GKE)](/partners/technology-partners/google-cloud-platform/) can be accomplished in a few simple steps. We have several blog posts and documentation that provide detailed [setup instructions for working with Kubernetes clusters](#other-resources). In this post, we highlight the essential steps so that you can get going with GitLab CI/CD in less than 15 minutes.\n\nBy using the GitLab and GKE integration, with one click, you install GitLab Runners on GKE and immediately start running your CI pipelines. Runners are the lightweight agents that execute the CI jobs in your [GitLab CI/CD](/topics/ci-cd/) pipeline.\n\n## Prerequisites:\n\nThe following pre-requisities will need to have been configured in order for you to use the built in GitLab GKE integration:\n- GitLab instance installed and configured with user credentials\n- [Google OAuth2 OmniAuth Provider](https://docs.gitlab.com/ee/integration/google.html) installed and configured on your GitLab instance\n- A Google Cloud project with the following [APIs enabled](https://docs.gitlab.com/ee/integration/google.html#enabling-google-oauth):\n  - Google Kubernetes Engine API\n  - Cloud Resource Manager API\n  - Cloud Billing API\n\n## Get started\n\n![Setup pipeline](https://about.gitlab.com/images/blogimages/ci-gke-in-15/gke_in_15_pipeline.png){: .shadow.medium.center}\n\n### Step 1\n\nWe’re going to add a shared runner at the instance level. First, as an administrator, click the “Admin Area” icon\n\n![Runner setup step 1](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_001.png){: .shadow.medium.center}\n\nThen on the left menu, select “Kubernetes”\n\n![Runner setup step 2](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_002.png){: .shadow.medium.center}\n\n### Step 2\n\nClick the green “Add Kubernetes cluster” button.\n\n![Runner setup step 3](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_003.png){: .shadow.medium.center}\n\n### Step 3\n\nThe screen to “Add a Kubernetes cluster integration” should come up. Click on the “Google GKE” icon on the right.\n\n![Runner setup step 4](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_004.png){: .shadow.medium.center}\n\n### Step 4\n\nGive your cluster a name, and select a “Google Cloud Platform project” from your linked GCP account. If no projects are populated in the menu then either your Google OAUTH2 integration isn’t configured correctly or your project is missing the needed permissions. Check that these are set up and that the [APIs mentioned in the prerequisites above](#prerequisites) are enabled.\n\nChoose a zone in which to run your cluster. For the purposes of running CI, the number of nodes in your cluster is going to be how many simultaneous jobs you can run at given time. As we are using the built-in GitLab Google Kubernetes integration, you can set a maximum of four nodes.\nHere we set that to three.\n\nClick “Create Kubernetes Cluster”\n\n![Runner setup step 5](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_005.png){: .shadow.medium.center}\n\nIt takes a few minutes for the cluster to be created. While it’s happening you should see a screen like this. You can leave this screen and come back (by going to “Admin Area> Kubernetes > [your cluster name]”)\n\n![Runner setup step 6](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_006.png){: .shadow.medium.center}\n\n### Step 5\n\nOnce the cluster has been created, we need to install two applications. First, install “Helm Tiller” by clicking on the “Install” button next to it.\n\n![Runner setup step 7](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_007.png){: .shadow.medium.center}\n\nThis takes a moment, but should be much quicker than creating the cluster initially was.\n\n![Runner setup step 8](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_008.png){: .shadow.medium.center}\n\n### Step 6\n\nNow that Helm Tiller is installed, more applications can be installed. For this tutorial we only need to install the “GitLab Runner” application. Click the install button next to GitLab Runner.\n\n![Runner setup step 9](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_009.png){: .shadow.medium.center}\n\nAgain, this should go pretty quickly.\n\n![Runner setup step 10](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_010.png){: .shadow.medium.center}\n\nOnce done, the button will change to an “Uninstall” button. You’re now set up with shared runners on your GitLab instance and can run your first CI pipeline!\n\n![Runner setup step 11](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_011.png){: .shadow.medium.center}\n\n### Next steps\n\nNow that you are up and running with GitLab CI/CD on GKE, you can build and run your first GitLab CI/CD pipeline. Here are links to a few resources to get you started.\n\n- [Getting Started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/)\n- [How to build a CI/CD pipeline in 20 minutes or less](/blog/building-a-cicd-pipeline-in-20-mins/)\n- [Getting started with Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html)\n\nIf you are planning to manage your own fleet of GitLab Runners, then you may also be thinking about how best to set up autoscaling of GitLab Runners. As we have just set up your first Runner on GKE, then you can review the [GitLab Runner Kubernetes Executor docs](https://docs.gitlab.com/runner/executors/kubernetes.html) for additional details as to how the GitLab Runner uses Kubernetes to run builds on a Kubernetes cluster.\n\n### Other resources\n\n- [Scalable app depoyment webcast](https://about.gitlab.com/webcast/scalable-app-deploy/)\n- [Install GitLab on a cloud native environment](https://docs.gitlab.com/charts/)\n- [Adding and removing Kubernetes clusters](https://docs.gitlab.com/ee/user/project/clusters/add_remove_clusters.html)\n- [Deploy production-ready GitLab on Google Kubernetes Engine](https://cloud.google.com/solutions/deploying-production-ready-gitlab-on-gke)\n\nCover image by [Agê Barros](https://unsplash.com/photos/rBPOfVqROzY) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[232,1228,830,9,2619,939],"GKE",{"slug":2621,"featured":6,"template":700},"gitlab-ci-on-google-kubernetes-engine","content:en-us:blog:gitlab-ci-on-google-kubernetes-engine.yml","Gitlab Ci On Google Kubernetes Engine","en-us/blog/gitlab-ci-on-google-kubernetes-engine.yml","en-us/blog/gitlab-ci-on-google-kubernetes-engine",{"_path":2627,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2628,"content":2633,"config":2639,"_id":2641,"_type":14,"title":2642,"_source":16,"_file":2643,"_stem":2644,"_extension":19},"/en-us/blog/gitlab-com-13-4-breaking-changes",{"title":2629,"description":2630,"ogTitle":2629,"ogDescription":2630,"noIndex":6,"ogImage":2088,"ogUrl":2631,"ogSiteName":685,"ogType":686,"canonicalUrls":2631,"schema":2632},"Upcoming Breaking Changes to Secure Analyzers in GitLab 13.4","Our next release, 13.4, will include narrow breaking changes for our Secure scanning features. Find out how this could affect you and what you need to do.","https://about.gitlab.com/blog/gitlab-com-13-4-breaking-changes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Upcoming GitLab.com narrow breaking changes to Secure Analyzers in GitLab 13.4\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2020-08-19\",\n      }",{"title":2634,"description":2630,"authors":2635,"heroImage":2088,"date":976,"body":2637,"category":697,"tags":2638},"Upcoming GitLab.com narrow breaking changes to Secure Analyzers in GitLab 13.4",[2636],"Taylor McCaslin","\n\nWe've spent the first few releases of GitLab 13 with several user-focused improvements to our Static [Application Security Testing (SAST)](/topics/devsecops/) capabilities: \n\n* We made our open-source based SAST analyzers free to use for every GitLab user on all tiers [covering 18 languages/frameworks](https://docs.gitlab.com/ee/user/application_security/sast/#supported-languages-and-frameworks). \n* We released a new [Secret Detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/) scan type and a managed CI template. This also added new capabilities like [full history secret detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/#full-history-secret-scan). \n\nWith these changes we've modernized and simplified the way our Security scans work, requiring the deprecation and removal of a few configuration options to improve the security, stability, and speed of our analyzers. \n\nWith these removals, there are a few changes that you should make to your Secure scan configurations to ensure you continue enjoying those capabilities. All of these removals were previously announced as deprecations in the past few release blog posts.  \n\n**These changes will release to GitLab.com as early as August 27th and will be released to self-managed customers with GitLab 13.4 on September 22.** If you have questions or feedback, you can [let us know in this feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/235816).\n\n\n## Removal of Secret Detection Job in SAST CI Template (High Impact)\n\nSince [GitLab 13.1](/releases/2020/06/22/gitlab-13-1-released/#deprecation-of-secret-detection-job-in-sast-configuration), the [Secret Detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/) CI/CD configuration settings moved to a separate GitLab-provided template and run as a new Secure scan type. This new Secret Detection template is also now [included in Auto DevOps](https://docs.gitlab.com/ee/user/application_security/#security-scanning-with-auto-devops). \n\nIn 13.4 we will remove the [old SAST `secrets-sast` job definition](https://gitlab.com/gitlab-org/gitlab/-/blob/67e235bd5826c160db47bbb8c0dc87e6b9cd7b43/lib/gitlab/ci/templates/Security/SAST.gitlab-ci.yml#L171) and if you have not switched to the [new Secret Detection template](https://docs.gitlab.com/ee/user/application_security/secret_detection/#configuration) you will not continue to scan for secrets. You can easily transition by adding the new template.\n\nBefore upgrading to GitLab 13.4 we recommend you [add the new Secret Detection template](https://docs.gitlab.com/ee/user/application_security/secret_detection/#configuration) to your `gitlab-ci.yml` file, and then remove the [old SAST `secrets-sast` job definition](https://gitlab.com/gitlab-org/gitlab/-/blob/67e235bd5826c160db47bbb8c0dc87e6b9cd7b43/lib/gitlab/ci/templates/Security/SAST.gitlab-ci.yml#L171) from the [SAST configuration template](https://docs.gitlab.com/ee/user/application_security/sast/#configuration) in `SAST.gitlab-ci.yml` file. We have made a [video to guide you through the process of transitioning](https://www.youtube.com/watch?v=W2tjcQreDwQ&feature=emb_title) to this new template. \n\n- You can follow [this implementation issue](https://gitlab.com/gitlab-org/gitlab/-/issues/234011) for further details.\n- [Initial deprecation announced in 13.1 (6/22)](/releases/2020/06/22/gitlab-13-1-released/#deprecation-of-secret-detection-job-in-sast-configuration)\n\n\n## Removal of DinD (Medium impact)\n\nTo increase the security and reduce complexity of scans, use of Docker-in-Docker (DinD) in GitLab Secure scanners was [deprecated in 13.0](/releases/2020/05/22/gitlab-13-0-released/#deprecation-of-docker-in-docker-(dind)-for-security-scanners) and is **scheduled for removal in 13.4**. GitLab security products started to use non-DinD mode by default in vendor templates in GitLab 13.0. We encourage customers to update their vendor CI templates to use this new behavior. If you override or use custom [Secure CI templates](https://gitlab.com/gitlab-org/gitlab-foss/-/tree/master/lib/gitlab/ci/templates/Security), you can follow the guides below to disable Docker in Docker (DinD) from your existing job templates: \n \n* [Disabling Docker in Docker for Dependency Scanning (12.10 Documentation)](https://docs.gitlab.com/12.10/ee/user/application_security/dependency_scanning/index.html#disabling-docker-in-docker-for-dependency-scanning)\n* [Disabling Docker in Docker for SAST (12.10 Documentation)](https://docs.gitlab.com/12.10/ee/user/application_security/sast/#disabling-docker-in-docker-for-sast)\n* [Initial deprecation announced in 13.0 (5/22)](/releases/2020/05/22/gitlab-13-0-released/#deprecation-of-docker-in-docker-(dind)-for-security-scanners)\n\n\n## Transition of Secure Analyzers to Linux Alpine image (Low impact)\n\nTo [simplify and modernize](/direction/secure/static-analysis/sast/#whats-next--why) our [GitLab Secure SAST Analyzers](https://docs.gitlab.com/ee/user/application_security/sast/#supported-languages-and-frameworks), we will transition the [GitLab Bandit Python Analyzer](https://gitlab.com/gitlab-org/security-products/analyzers/bandit) image from Debian Buster to [Alpine Linux](https://alpinelinux.org/about/). This transition will reduce the image size and increase both the speed and security of our analyzer.\n\n    This transition will be backward incompatible though we expect limited impact. If you use a `before_script` to pre-build dependencies for your Python project, you should test this change before upgrading to GitLab 13.4. We will add a new section in the [SAST troubleshooting documentation](https://docs.gitlab.com/ee/user/application_security/sast/#troubleshooting) with more information about this change as we approach 13.4.\n\n- [Initial deprecation annouced in 13.2 (7/22)](/releases/2020/07/22/gitlab-13-2-released/#transitioning-gitlab-bandit-secure-analyzer-os-image)\n\n\n## Transition of TSLint Job to ESLint (Low impact)\n\nThe [recent update of our ESLint Secure analyzer](/releases/2020/07/22/gitlab-13-2-released/#javascript--typescript-sast-analyzer-available-for-all) includes new support for TypeScript which is actively maintained. Since 2019 the [TSLint project has been deprecated](https://palantir.github.io/tslint/) in favor of ESLint. We have now unified these analyzers in [GitLab's ESLint analyzer](https://gitlab.com/gitlab-org/security-products/analyzers/eslint), which renders our TSLint analyzer obsolete. \n      \nIn 13.2 we deprecated the [TSLint Secure analyzer](https://gitlab.com/gitlab-org/security-products/analyzers/tslint) and have removed the [TSLint job definition from the SAST template](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Security/SAST.gitlab-ci.yml). If you leverage [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) or `include` the [GitLab Secure SAST Template](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Security/SAST.gitlab-ci.yml) no action is required, as this transition happened automatically when you updated to GitLab 13.2. We recommend that anyone using the TSLint SAST job in a customized CI template to migrate to the [newly updated ESLint Job](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Security/SAST.gitlab-ci.yml#L85).\n\nThe next time the SAST job runs after this transition you may see previously present TSLint vulnerabilities being marked as \"resolved\" and new TypeScript vulnerabilities from ESLint. This behavior is expected due to the new unique vulnerability signatures from ESLint which are different from old TSLint job scan vulnerability signatures.\n\n- [Initial deprecation annouced in 13.2 (7/22)](/releases/2020/07/22/gitlab-13-2-released/#deprecation-and-planned-removal-of-tslint-secure-analyzer)\n\n\n## Looking towards the future\n\nWe are always working to improve the security, efficiency, and quality of our Security scanning tools. These deprecations and removals help us rapidly improve our solution and allow us to deliver on our [Secure product vision](/direction/secure/). We appreciate your understanding of these changes, and if you have questions about these deprecations and removals please [let us know in this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/235816).\n",[2097,9,697],{"slug":2640,"featured":6,"template":700},"gitlab-com-13-4-breaking-changes","content:en-us:blog:gitlab-com-13-4-breaking-changes.yml","Gitlab Com 13 4 Breaking Changes","en-us/blog/gitlab-com-13-4-breaking-changes.yml","en-us/blog/gitlab-com-13-4-breaking-changes",{"_path":2646,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2647,"content":2653,"config":2659,"_id":2661,"_type":14,"title":2662,"_source":16,"_file":2663,"_stem":2664,"_extension":19},"/en-us/blog/gitlab-community-day",{"title":2648,"description":2649,"ogTitle":2648,"ogDescription":2649,"noIndex":6,"ogImage":2650,"ogUrl":2651,"ogSiteName":685,"ogType":686,"canonicalUrls":2651,"schema":2652},"Join the first GitLab Community Day and get started with CI","Learn about GitLab CI and get to know the community.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681760/Blog/Hero%20Images/Community-day-banner-1.png","https://about.gitlab.com/blog/gitlab-community-day","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Join the first GitLab Community Day and get started with CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Coghlan\"}],\n        \"datePublished\": \"2020-11-19\",\n      }",{"title":2648,"description":2649,"authors":2654,"heroImage":2650,"date":2656,"body":2657,"category":978,"tags":2658},[2655],"John Coghlan","2020-11-19","\n\nWe are excited to host our first GitLab Community Day on Dec 1st, 2020. This will be a global event, with sessions at different times to enable everyone in the GitLab community to attend worldwide. For our first Community Day, we will focus on GitLab CI. Come and join us to learn more about CI and meet our community!\n\n## Who should attend\n\nAre you just starting to learn about GitLab or [Continuous Integration](/solutions/continuous-integration/)? If so, these Community Day sessions are a great way to accelerate your learning process.\nIf you already use GitLab CI, we'd love for you to join us to share your knowledge and experience with other community members.\nRegardless of where you are in your learning curve, we invite you to bring your projects and questions and learn in real-time with us.\n\n## What to expect\n\nThe GitLab Developer Evangelism team will walk you through an introduction to CI followed by hands-on exercises. These will show how to get started with GitLab CI and how to add security scanning to your pipelines. By the end of the session, you will have successfully created your own CI pipelines using GitLab.\n\nEveryone who participates in the Community Day events will be eligible for special GitLab swag and prizes.\n\n## Register\n\nWe are offering three sessions to enable our community members from around the world to participate. Please select the session that works best for you and register on Meetup.com.\n\n- Session #1 with [Abubakar Siddiq Ango](/company/team/#abuango) at [4:00PM CST](https://www.timeanddate.com/worldclock/fixedtime.html?iso=20201201T16&p1=3910): [RSVP](https://www.meetup.com/gitlab-virtual-meetups/events/274628363/)\n- Session #2 with [Michael Friedrich](/company/team/#dnsmichi) at [4:00PM GMT](https://www.timeanddate.com/worldclock/fixedtime.html?iso=20201201T16&p1=%3A): [RSVP](https://www.meetup.com/gitlab-virtual-meetups/events/274628394/)\n- Session #3 with [Brendan O'Leary](/company/team/#brendan) at [4:00PM PT](https://www.timeanddate.com/worldclock/fixedtime.html?iso=20201201T16&p1=3922): [RSVP](https://www.meetup.com/gitlab-virtual-meetups/events/274628411/)\n\n## Spread the word\n\nPlease help spread the word about our community day by sharing this on social media. Feel free to use the `#GitLabCommunityDay` hashtag so we can amplify your posts!\n",[9,268],{"slug":2660,"featured":6,"template":700},"gitlab-community-day","content:en-us:blog:gitlab-community-day.yml","Gitlab Community Day","en-us/blog/gitlab-community-day.yml","en-us/blog/gitlab-community-day",{"_path":2666,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2667,"content":2673,"config":2679,"_id":2681,"_type":14,"title":2682,"_source":16,"_file":2683,"_stem":2684,"_extension":19},"/en-us/blog/gitlab-flow-duo",{"title":2668,"description":2669,"ogTitle":2668,"ogDescription":2669,"noIndex":6,"ogImage":2670,"ogUrl":2671,"ogSiteName":685,"ogType":686,"canonicalUrls":2671,"schema":2672},"Combine GitLab Flow and GitLab Duo for a workflow powerhouse ","Add the AI-powered capabilities of GitLab Duo to GitLab Flow to boost the efficiency of DevSecOps workflows. This is a guide for deployment in your environment, including a video tutorial.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662840/Blog/Hero%20Images/ai-experiment-stars.png","https://about.gitlab.com/blog/gitlab-flow-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Combine GitLab Flow and GitLab Duo for a workflow powerhouse \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2023-07-27\",\n      }",{"title":2668,"description":2669,"authors":2674,"heroImage":2670,"date":2675,"body":2676,"category":849,"tags":2677,"updatedDate":2678},[1506],"2023-07-27","Starting out with DevSecOps requires a well-thought-out workflow, but that can sometimes seem like a daunting challenge. Luckily, there are two things that can help: GitLab Flow and GitLab Duo. GitLab Flow is a prescribed approach to help organizations successfully apply DevSecOps processes. GitLab Duo is a [powerful set of AI-powered capabilities](https://about.gitlab.com/blog/supercharge-productivity-with-gitlab-duo/) within the GitLab DevSecOps Platform that can help organizations develop code, improve operations, and secure software more efficiently. Combined, GitLab Flow and GitLab Duo can help organizations achieve significant improvements in end-to-end workflow efficiency, which can lead to even higher levels of productivity, deployment frequency, code quality and overall security, and production resiliency and availability.\nIn this article, we delve into how GitLab Flow and GitLab Duo can be used together to help organizations be successful with DevSecOps.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n## What is GitLab Flow?\nGitLab Flow is a prescribed and opinionated end-to-end workflow for the development lifecycle of applications when using GitLab, an AI-powered DevSecOps platform with a single user interface and a single data model. GitLab Flow is based on best practices and lessons learned from customer feedback and our dogfooding. Furthermore, GitLab Flow spans across the [stages of the DevSecOps lifecycle](https://about.gitlab.com/stages-devops-lifecycle/), forming an efficient workflow with an inner feedback loop for reviewing a specific update and an outer feedback loop for improving the entire application, as well as the development lifecycle itself. \n\n![The GitLab Flow inner and outer loops](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/The-GitLab-Flow-2023-feedback-loops.png)\n\u003Ccenter>The GitLab Flow inner and outer loops\u003C/center>\u003Cp>\u003C/p>\n\nAs you can see by the many stages in GitLab Flow, there is much more to developing software than writing code. Below, we'll dive into each step of GitLab Flow and how GitLab Duo can help. \n\n### Planning\nThe first portion of GitLab Flow is planning, which sits on the outer feedback loop of GitLab Flow. It encompasses issues, merge requests, epics, milestones, iterations, release, release evidence, and more. Let’s cover what roles these components play in GitLab Flow and how GitLab Duo can help.\n\n![Planning - first portion of GitLab Flow](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/The-GitLab-Flow-2023-planning-portion.png)\n\u003Ccenter>Planning - first portion of GitLab Flow\u003C/center>\u003Cp>\u003C/p>\n\n#### Issues\nIssues are where product problems or new features are defined and where team members can collaborate. As an issue is created, you can populate its title and then leverage GitLab Duo **Issue description generation** capability to help enrich the description field, saving time and effort. Because many stakeholders can participate in comment threads on an issue, **Discussion summary** is an AI-powered capability in GitLab Duo that can summarize hundreds of comments on an issue into a concise paragraph so that a stakeholder can quickly get caught up with the conversation, jump into the discussion, and become productive right away.\n\nIssues can be organized and visualized in issue boards, which are a software project management tool that can be used as kanban or Scrum boards. These boards help teams plan, organize, and visualize a workflow for a feature or product release. Different categories of boards can be created and issues can be moved from one board to another one with a simple drag and drop.\n\n#### Merge requests\nMerge requests are where solutions are developed. As release components, issues and merge requests provide the auditability and tracking of application changes done by stakeholders, such as DevOps and platform engineers, system and database administrators, security engineers, and developers. In addition, issues and merge requests are key inputs for the release planning process.\n\nMerge requests can be individually created or created from an issue. Creating a merge request from an issue automatically relates it to that issue so when the merge request is merged its associated issue is automatically closed. Merge requests can also be manually related to an issue.\n\n![Merged merge request will close issue](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/mr-with-its-issue.png)\n\u003Ccenter>Merged merge request will close issue\u003C/center>\u003Cp>\u003C/p>\n\nLike issues, merge requests can include a long list of updates to a feature branch by many stakeholders. Collaborators who need to familiarize themselves with or understand all of the updates included in a merge request can take advantage of the **Merge request summary** capability in GitLab Duo to quickly get caught up on the changes. In addition, collaborators can invoke GitLab Duo **Code Merge request template population**, which uses a pre-created merge request template and automatically fills in the content for sections in it. Description templates provide a way to standardize and optimize collaboration and communication across the development lifecycle and GitLab Duo speeds this up even more!\n\nIssues with the same theme can be grouped together in an epic to organize the work to be done. Epics can have child issues and sub-epics and/or be linked to epics across the organization. Iterations can be used to track sprints of work, and can be manually scheduled or scheduled automatically using GitLab iteration cadences to streamline planning workflows. In addition, iterations include burndown and burnup charts. Burndown charts help track overall progress towards a project's total scope, while burnup charts track the daily total count and weight of issues added to and completed in a given timebox.\n\n#### Milestones\nTeams can use milestones to organize issues and merge requests into a cohesive group with an optional start date and an optional due date. Milestones are typically used to track releases and can track issues and merge requests at a project level or group level. Similar to iterations, milestones also provide burndown and burnup charts to show progress.\n\nMilestones can be associated with a release, whose automated creation generates many artifacts, including the release evidence. The release evidence is an automatically collected snapshot of data that’s related to the release. In addition to test artifacts and linked milestones, job artifacts can optionally be included in the release evidence, which can facilitate internal processes such as external audits.\n\nEpics, milestones, and iterations can be visualized via the Roadmaps page, which helps track release progress and streamline the release process. \n\nOnce the planning takes place, the work towards the resolution of a problem or a new feature can start. This happens in merge requests. Let’s delve deeper into how that happens in GitLab Flow. \n\n> [Learn more by trying GitLab Flow and GitLab Duo](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com%2Fblog%2F).\n\n### Merge requests and pushing code\n\n![Merge requests and pushing code - second portion of GitLab Flow](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/The-GitLab-Flow-2023-mr-pushing-code-portion.png)\n\u003Ccenter>Merge requests and pushing code - second portion of GitLab Flow\u003C/center>\u003Cp>\u003C/p>\n\nThe second portion of GitLab Flow is related to merge requests and pushing code. As mentioned earlier, merge requests are where solutions are developed through collaboration among stakeholders across the organization. This collaboration can happen in a distributed manner and asynchronously. Participants can take advantage of collaborative capabilities, such as tagging, inline suggestions, inline comments, merge request comments, review threads, and review requests, which can help improve code quality, availability, reliability, and performance. Right after the creation of the merge request is the start of the GitLab Flow inner feedback loop, which is where code and fix pushes, test and scan runs, and collaboration and update reviews take place.\n\n#### Pipelines\nAs updates are applied to a feature branch via merge requests, pipelines — if defined — are automatically executed. Pipelines can have multiple stages and jobs to build and test, and then deploy the application or microservice to a review environment. In that review environment, the updates can be dynamically verified before they are merged to the main branch. This automation helps streamline the application update and review processes.\n\nIn addition, as DevSecOps teams make updates to the application via merge requests, they have a variety of AI-powered capabilities at their disposal. As they write or update code, GitLab Duo **Code Suggestions** recommends code that should come next and the developer can choose to accept or ignore the recommendation. Code Suggestions support code generation via prompts as well as code completion as you type. Code Suggestions can help improve the programming experience by reducing errors and helping developers write code faster, which can help enhance production code quality. Code Suggestions also can lead to higher developer productivity and faster iterations and rollouts.\n\nAs different stakeholders within the organization participate in the development or review of applications, they may encounter code that is poorly documented, complex or difficult to understand, or is written in a programming language unfamiliar to them. The GitLab Duo **Code explanation** capability explains code in natural language so that everyone can understand the code and get up to speed quickly.\n\nMoreover, when updates are committed to the feature branch, the GitLab Duo **Suggested reviewers** capability uses the changes in a merge request and a project’s contribution graph to suggest appropriate reviewers in the reviewer dropdown in the merge request sidebar. The list includes users that are knowledgeable about a specific aspect of the application and would be the best candidates to review the updates. Developers save time by not having to search and identify adequate reviewers, streamlining the review process and avoiding delays and low-quality reviews.\n\nWhen developers make changes to the code, they often don't include a comment in the merge request about the specific changes they made. The GitLab Duo **Merge request summary** capability allows the author of merge request changes to use AI to generate a natural-language comment that summarizes the updates to the code. Reviewers then can better understand the changes and streamline the entire review process\n\nAs reviewers review updates to the code in a merge request, they can create a review block, which can consist of many comments spanning many source files. To help the original author of the updates better understand the feedback provided by the reviewer in a long review block, the GitLab Duo **Code review summary** capability generates a natural-language summary of the reviewer’s feedback. This enables better handoff between authors and reviewers, streamlining the review process.\n\nFurthermore, when developers add new code via a merge request, they can leverage the GitLab Duo **Test generation** capability to use AI to generate unit tests for the new code. This can help to increase developer productivity, improve test coverage, and catch bugs early in the development lifecycle. Developers can also leverage GitLab Duo **Chat**, which is always accessible, to refactor code and generate in-line documentation, e.g. docstrings, for their source code.\n\nWhile pipelines execute on branch updates, they can include automated tests and scans, which helps in shifting security left.\n\n### Shifting security left\n\n![Shifting security left - third portion of GitLab Flow](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/The-GitLab-Flow-2023-shift-sec-left-portion.png)\n\u003Ccenter>Shifting security left - third portion of GitLab Flow\u003C/center>\u003Cp>\u003C/p>\n\nThe third portion of GitLab Flow is shifting security left, which is also part of the GitLab Flow inner feedback loop.\n\nIn addition to DevOps and platform engineers, system and database administrators, and developers, some of the stakeholders collaborating in a merge request may be concerned about security and compliance, which is where automated tests and security scans play a role. Scans can be simply included in a pipeline via readily available templates and/or can be automatically executed within a merge request pipeline. GitLab provides a broad set of built-in security scanners and analyzers that can be leveraged by GitLab Flow, but the DevSecOps platform can also accommodate third-party and custom scanners.\n\nGitLab Flow shifts security left in the pipeline to detect and resolve defects as early as possible in the software development process. It is much simpler and cheaper to fix vulnerabilities early in the development cycle than once the application is in production, where an unscheduled outage can affect your users and revenue.\n\nThe built-in security scanners and analyzers provided by GitLab include: unit testing, infrastructure-as-code (IaC) scanning, static application security testing (SAST) scanners, dependency scanning, secret detection, container scanning, API security, web API fuzz testing, and coverage-guided fuzz testing. In addition, GitLab provides a variety of security dashboards and reports to manage and visualize vulnerabilities, such as the Dependencies list, Security dashboard, Vulnerability Report, and vulnerability pages.\n\nTo help developers and security engineers better understand and remediate vulnerabilities more efficiently, the GitLab Duo **Vulnerability explanation** capability provides an explanation about a specific vulnerability, how it can be exploited, and, most importantly, a recommendation on how to fix the vulnerability. Developers can also take advantage of GitLab Duo **Vulnerability resolution**, which automatically creates a merge request that includes code changes to fix the vulnerability. These AI-powered capabilities can help streamline and optimize the process of securing and hardening an application to prevent vulnerabilities that can be exploited by cyber attacks in production.\n\nBesides SAST scanners, GitLab provides dynamic application security testing (DAST) scanners, which require a running application. When leveraging these scanners, GitLab is capable of automatically provisioning a DAST environment for the DAST scans and then performing a complete cleanup of all resources post-DAST testing. In addition, for running containers, GitLab provides operational container scanning, which scans container images in your cluster for security vulnerabilities.\n\nThe scans mentioned above can be executed automatically within a merge request pipeline or, in some cases, can be scheduled for execution via scan execution and merge request approval policies. These policies can be defined via the GitLab UI or YAML files and are configured in a separate project, allowing segregation of duties for reusability, maintenance, and management. Scan execution policies require that security scans be run on a specified schedule or with the project pipeline, and merge request approval policies take action based on scan results. Security engineers or teams can define these policies to enforce security processes across the organization and GitLab Flow may encounter or leverage these as it spans through its steps.\n\nTo enforce security and compliance across projects in your organization, you can use compliance labels and pipelines. Compliance labels and pipelines can be made mandatory to execute before a project’s own pipeline. With this approach, you can ensure that all teams within your organization meet your security and compliance standards. In addition, you can secure your applications against cyber attacks, conform to government compliance standards, and always be audit-ready.\n\nThe main goal of all of these GitLab Flow security prescriptions is to fix vulnerabilities early in the development cycle rather than once the application is in production, where remediating a vulnerability can prove to be very costly in reputation and revenue.\n\nAs vulnerabilities are mitigated within the GitLab Flow inner feedback loop and more updates are applied to the application in the feature branch, stakeholders need to re-review these updates to ensure that the updates have taken place and no regressions have inadvertently been introduced.\n\n### Continuous review\n\n![Reviews - fourth portion of GitLab Flow](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/The-GitLab-Flow-2023-reviewing-features-portion.png)\n\u003Ccenter>Reviews - fourth portion of GitLab Flow\u003C/center>\u003Cp>\u003C/p>\n\nThe next portion of GitLab Flow is reviewing features, which prescribes the continuous review of applications. Reviewing features involves the ability to stand up a review environment to which the interim application (feature branch) is deployed so that stakeholders can review it in real time and provide feedback. The interim application can then be continuously adjusted until it is ready to be merged to the main branch. GitLab Flow also prescribes the cleanup of all provisioned review environment resources at the moment when the merge request is merged to the main branch.\n\nThis iterative automated review process is part of the inner feedback loop in GitLab Flow. As mentioned above, within the inner feedback loop, GitLab Duo capabilities like Code explanation, Code Suggestions, Suggested reviewers, Merge request summary, Merge request template population, Code review summary, Vulnerability explanation, Vulnerability resolution, and Root cause analysis are prescribed by GitLab Flow to enable a better handoff between authors and reviewers and streamline the entire review process.\n\nThe GitLab Flow inner feedback loop terminates when all review items are addressed and the merge request is approved and merged to the main branch, which triggers the deployment of the application to production.\n\n### Deploying applications and infrastructure\n\n![Deploying - fifth portion of GitLab Flow](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/The-GitLab-Flow-2023-deploy-apps-portion.png)\n\u003Ccenter>Deploying - fifth portion of GitLab Flow\u003C/center>\u003Cp>\u003C/p>\n\nDepending on an organization’s needs, either continuous delivery or continuous deployment is prescribed by GitLab Flow. Whereas continuous delivery is the frequent release of code by triggering the deployments manually (e.g., to production), continuous deployment is the automated release of code (e.g., to production) without human intervention. Let’s cover continuous delivery first.\n\nAs you release your software using continuous delivery, you have a few deployment options. You can establish a freeze window and then deploy using advanced deployment techniques, such as canary, blue/green, timed, and incremental rollouts. Incremental rollouts can lower the risk of production outages delivering a better user experience and customer satisfaction. Advanced deployment techniques can also improve development and delivery efficiency, streamlining the release process.\n\nAs you release your software using continuous deployment, all changes/updates go directly to production. Progressive delivery approaches like feature flags, which allow you to separate the delivery of specific features from a launch, are a good way to reduce risk and manage what functionality to make available to production users. Feature flags support multiple programming languages and allow developer experimentation and controlled testing. You can even use feature flags to roll out features to specific users.\n\nAlthough GitLab supports all these deployment approaches, GitLab Flow allows for the adoption of the approach that best fits the organization and/or specific project needs.\n\n### Monitoring applications and DevSecOps processes\nOnce your application has been deployed to production, it needs to be continuously monitored to ensure its stability, performance, and availability. In addition, as the DevSecOps processes execute, they are measured, providing the opportunity to improve their performance and efficiency. The monitoring capabilities are provided by GitLab and, as such, can be leveraged by GitLab Flow.\n\nFor running containers, GitLab provides operational container scanning (OCS), which scans container images in your cluster for security vulnerabilities. These scans can be automated by scheduling them when to run and any found vulnerabilities are automatically displayed in a security dashboard. The OCS can help keep your cluster applications secure and preempt any cyber attacks that can lead to leaks of private data and even cause unexpected outages.\n\nError tracking allows developers to discover and view errors generated by their application. All errors generated by your application are displayed in the Error Tracking list in GitLab. Error tracking can help with availability and performance of your applications by detecting and resolving unexpected application conditions fast.\n\nGitLab can accept alerts from any monitoring source, including Prometheus, via a webhook receiver. As alerts come in, they are displayed in the GitLab Alerts list, from which you can manually manage them. Alerts can also automatically trigger the creation of incidents, ChatOps, and email messages to appropriate individuals or groups. All these capabilities streamline the alert resolution and management process.\n\nAs incidents are created, due to production problems, they appear in the GitLab Incidents list for incident management. You can manage one or more incidents, sort them, search them, assign them, set their statuses, and even see their SLA preset countdown timer. Moreover, you can create on-call schedules and rotations, escalation policies, and set up paging and notifications to handle incidents. In addition, you can link an incident to an alert so that when the incident is closed, its associated alert is automatically resolved. Incident timelines are another capability for executives and external viewers to see what happened during an incident, and which steps were taken for it to be resolved. All these capabilities streamline the incident management process so that they can be resolved as quickly as possible.\n\nAudit events track important events, including who performed the related action and when in GitLab. These events are displayed in the GitLab Audit Events list and provide, among others, the action that was taken on an object, who did it, and the date and time of its occurrence.\n\nAll the lists and dashboards mentioned above can help preempt out-of-compliance scenarios to avoid penalties as well as streamline audit processes. For your running applications, they generate the data and metrics that can be used in the GitLab Flow outer feedback loop to help improve and optimize your applications and lower the risk of unscheduled production outages.\n\n### Continuous improvement\nWhen applying GitLab Flow, you also have the opportunity to use the insight that GitLab provides in the form of end-to-end process metrics dashboards to continuously improve not just your application but also your software delivery performance. These dashboards and their metrics are auto-generated by GitLab and are always available.\n\n### The Value Stream Analytics dashboard\n\nYou can track and monitor your application development lifecycle through the Value Stream Analytics Dashboard, where you can check project or group statistics over time. This dashboard is customizable but you can get started quickly by creating a value stream using a GitLab-provided default template. The default dashboard displays metrics for each of the pre-defined stages of your value stream analytics, namely Issue, Plan, Code, Test, Review, and Staging, as well as a graph with the average time to completion for each. It also shows the value stream analytics key metrics: lead time, cycle time, new issues, commits, and deploys. You can use these metrics to find areas of improvement in the stages of your value stream.\n\n### DORA metrics dashboard\n\nTo view the performance metrics that measure the effectiveness of your organization’s development and delivery practices, GitLab provides the [DORA](https://about.gitlab.com/solutions/value-stream-management/dora/) (DevOps Research and Assessment) metrics dashboard, which displays four key metrics: Deployment Frequency, Lead Time for Changes, Time to Restore Service, and Change Failure Rate. Deployment Frequency measures how often your organization deploys code to production or releases it to end users. Lead Time for Changes measures how long it takes to go from code committed to code successfully running in production. Time to Restore Service measures the time needed to restore services to the level they were previously, in case of an incident. Finally, Change Failure Rate is the percentage of changes to production or released to users that resulted in a degraded service (for example, a change that caused a service impairment or outage) and subsequently required remediation (required a hotfix, rollback, patch). These four key metrics are outcomes of your current processes and give you the opportunity to improve the factors and capabilities that drive them.\n\n### Customize your dashboard\n\nAnother dashboard is the Value Streams Dashboard, which is a customizable dashboard that enables decision-makers to identify trends, patterns, and opportunities for software development improvements. The metrics shown are the DORA metrics followed by the value stream analytics flow metrics and counts for critical and high vulnerabilities for the month to date, the two preceding months, and the past six months.\n\nGitLab Duo can also help in your continuous improvement efforts. For example, the **Value stream forecasting** capability takes historical data and uses data trends across your development lifecycle to predict the future behavior of your value stream metrics. You can use these predictive analyses in your optimization initiatives.\n\nAll these dashboards and the metrics they report on are part of the GitLab Flow outer feedback loop to help you lower the risk of unscheduled production outages and improve and optimize your applications and DevSecOps workflows.\n\n### AI impact analytics\nTo better understand the impact of the use of GitLab Duo (or AI) along the entire development life cycle, you can check the [AI Impact analytics](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/), from where you can see how the adoption of GitLab Duo Code Suggestions impacts other performance, quality and security metrics. You can visualize the last six months of AI adoption and its impact on other metrics, such as cycle time, lead time, deployment frequency, change failure rate, and critical vulnerabilities over time.\n\nAI impact analytics help to measure adoption, effectiveness and benefits that AI brings to teams and organizations and also to identify areas for improvement.\n\n## Why use GitLab Flow?\nGitLab Flow is a prescribed approach, practiced by our customers and users worldwide, that can provide the following benefits: \n- Higher productivity via the automation capabilities provided by GitLab and its single user interface and data model, all leveraged by GitLab Flow\n- Accurate insights into the end-to-end DevSecOps lifecycle to support continuous improvement\n- Built-in dashboards and metrics that can help you optimize your applications and DevSecOps processes\n- Higher code quality and improved reliability and availability of your applications\n- Better application security through built-in security scanners and capabilities\n- Compliance- and audit-readiness via built-in compliance features\n- Shorter cycle times that can help you increase deployment frequency\n- Continuous review enabled by the GitLab Flow inner feedback loop\n- The GitLab Flow inner feedback loop can help you optimize application updates leading to better code quality and higher reliability and availability of your applications\n- The GitLab Flow outer feedback loop can help you improve your applications as well as the development lifecycle itself\n- High levels of collaboration among stakeholders in your organization\n- Shifting security left to help find vulnerabilities in applications before they make it to production to avoid costly, unscheduled outages\n- Lower risk when deploying to production via the advanced deployment techniques and progressive delivery approaches supported by GitLab\n- AI-powered capabilities that span across the entire development lifecycle and can boost productivity, code quality, continuous improvement, security and compliance, and more\n- Support for cloud-native and non-cloud-native applications\n- Multi-cloud support for hybrid/multi-cloud applications\n- Shifting security left to help you find vulnerabilities in your applications before they make it to production so that you can avoid costly unscheduled outages\n\nHow can you get started with GitLab Flow? Leveraging GitLab Auto DevOps or parts of it is a good starting point for applying GitLab Flow principles to your application development lifecycle.\n\n## GitLab Flow and Auto DevOps\n\n![Auto DevOps - an instantiation of GitLab Flow](https://about.gitlab.com/images/blogimages/gitlab-flow-duo/ado-pipeline.png)\n\u003Ccenter>Auto DevOps - an instantiation of GitLab Flow\u003C/center>\u003Cp>\u003C/p>\n\n[Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) applies GitLab Flow throughout all its stages and jobs. You can think of it as a good example for the instantiation of GitLab Flow.\n\nAuto DevOps is a collection of predefined, out-of-the-box CI/CD templates that auto-discover the source code you have. Based on best practices, these templates automatically detect, build, test, deploy, and monitor your applications.\n\nThe Auto DevOps pipeline shifts work left to find and prevent defects as early as possible in the software delivery process. The pipeline then deploys the application to staging for verification and then to production in an incremental/timed fashion.\n\nAuto DevOps gets you started quickly, increasing developer productivity, and it can be easily customized to your needs, with support for the most common programming frameworks and languages. Auto DevOps is modular, customizable, and extensible, which allows you to leverage pieces of it in your pipelines or apply all of it for your application.\n\n## Get started\n[Combine GitLab Flow and GitLab Duo today](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com%2Fblog%2F) to achieve significant improvements in end-to-end workflow efficiency that can lead to even higher levels of productivity, deployment frequency, code quality and overall security, and production resiliency and availability. \n\nIf you'd like to see a workflow in action that combines GitLab Flow and GitLab Duo and how it can benefit you, watch the following video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/CKrZ4_tKY4I?si=Kf6QsYFIzKkJZpJd\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[9,851,696],"2024-06-18",{"slug":2680,"featured":6,"template":700},"gitlab-flow-duo","content:en-us:blog:gitlab-flow-duo.yml","Gitlab Flow Duo","en-us/blog/gitlab-flow-duo.yml","en-us/blog/gitlab-flow-duo",{"_path":2686,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2687,"content":2693,"config":2702,"_id":2704,"_type":14,"title":2705,"_source":16,"_file":2706,"_stem":2707,"_extension":19},"/en-us/blog/gitlab-hashicorp-terraform-vault-pt-1",{"title":2688,"description":2689,"ogTitle":2688,"ogDescription":2689,"noIndex":6,"ogImage":2690,"ogUrl":2691,"ogSiteName":685,"ogType":686,"canonicalUrls":2691,"schema":2692},"GitLab and HashiCorp streamline delivery workflows","Discover how to leverage CI/CD for your infrastructure scripts with Terraform and GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670238/Blog/Hero%20Images/gitlab-terraform-pipelines.jpg","https://about.gitlab.com/blog/gitlab-hashicorp-terraform-vault-pt-1","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and HashiCorp: Providing application and infrastructure delivery workflows\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kelly Hair\"},{\"@type\":\"Person\",\"name\":\"Anthony Davanzo\"}],\n        \"datePublished\": \"2019-09-17\",\n      }",{"title":2694,"description":2689,"authors":2695,"heroImage":2690,"date":2698,"body":2699,"category":1040,"tags":2700},"GitLab and HashiCorp: Providing application and infrastructure delivery workflows",[2696,2697],"Kelly Hair","Anthony Davanzo","2019-09-17","\nA growing number of teams are becoming more and more invested in continually improving the business through iterative development. Adopting the culture of DevOps isn’t necessarily confined to software development itself, but is equally applicable to ITOps, System Admins, and other infrastructure teams as well. Just as a proper CI/CD workflow is the foundation of today’s application delivery, a similar automated workflow is essential for managing the delivery of infrastructure as well.\n\nAs developers try to become more agile in building, packing, and testing their applications, having the right CI/CD tool that is flexible to other automation use cases is critical. GitLab has gone into great detail about their [flexible CI/CD capabilities here](https://docs.gitlab.com/ee/ci/introduction/index.html#how-gitlab-cicd-works). What’s sometimes overlooked is implementing the proper CI/CD process for the underlying infrastructure that these applications rely on. In addition to application delivery, organizations need to consider what their infrastructure delivery process looks like. GitLab and HashiCorp have partnered to create a multi-blog series on how to combine the application delivery workflow with the infrastructure delivery workflow. In this part we will discuss a high-level overview of the solutions that we will dive deeper into in Part 2.\n\n## Leveraging HashiCorp Terraform for CI/CD Pipelines\n\n[HashiCorp Terraform](https://www.terraform.io/) is an open source tool for provisioning infrastructure as code. Users define infrastructure in HashiCorp Configuration Language (HCL) configuration files, Terraform reads those configurations, offers a speculative plan of what it will create, and then users confirm and apply those changes. Terraform keeps track of what infrastructure is provisioned in a state file.\n\nThe recently announced Terraform Cloud application provides users with additional automation and collaboration capabilities on top of Terraform, such as remotely managing and version that state file, executing Terraform runs (plan/apply) remotely, and allowing teams to comment and collaborate on Terraform. By remotely managing state files, Terraform Cloud empowers teams to work more quickly and safely in parallel without concerns of losing the file or overwriting each other's changes. These features are especially helpful for users implementing CI/CD pipelines because they allow users to interact with Terraform via webhooks/API instead of having Terraform run on a local machine.\n\nMost users will store their configuration files in a VCS (Version Control System) like GitLab and connect that VCS to Terraform Cloud. That connection allows users to borrow best practices from software engineering to version and iterate on infrastructure as code, using VCS and Terraform Cloud as a provisioning pipeline for infrastructure. Terraform will automatically run a plan upon changes to configuration files in a VCS. This plan can be reviewed by the team for safety and accuracy in the Terraform UI, then it can be applied to provision the specified infrastructure. Terraform Cloud can also be configured to automatically apply those changes.\n\nTerraform Cloud also includes a Governance upgrade, which provides access to the [Sentinel](https://www.hashicorp.com/sentinel) policy as code framework.  This framework allows users to define fine-grain rules and policies for their infrastructure that are automatically enforced before that infrastructure is provisioned. This allows users to work with the speed and efficiency they want in their continuous integration/delivery pipelines, while still ensuring that best practices are being implemented.\n\n### Future iterations\n\nIt is also worth discussing current work in progress with GitLab and Vault. Vault from Hashicorp secures, stores, and tightly controls access to tokens, passwords, certificates, API keys, and other secrets that services depend on. In efforts to improve [Variables and secrets management in GitLab CI/CD](https://gitlab.com/groups/gitlab-org/-/epics/816) we’re working with HashiCorp to provide a [first-class integration with Vault](https://gitlab.com/gitlab-org/gitlab-ce/issues/61053) sometime in the future.\n\n## Next steps\n\nAs a follow up, we will soon be posting a blog on the technical details of _how_ to build a Terraform pipeline in GitLab CI/CD.\n\nIn meantime, check out how [WagLabs reduced their release process from 40 minutes to just six](/blog/wag-labs-blog-post/), using Terraform and GitLab CI/CD!\n\n### About the authors\n\n_[Anthony Davanzo](https://www.linkedin.com/in/anthonydavanzo/) is the product marketing manager for Terraform Cloud at HashiCorp. In this role he focuses on bringing Terraform Cloud to market, hoping to drive adoption and spread awareness of the tool. His prior role as the technical product marketing manager for Terraform helps with deep domain knowledge and before HashiCorp, he was a product marketing manager at Cloudflare._\n\n_[Kelly Hair](/company/team/#khair1) is a solutions architect at GitLab._\n\nPhoto by [Saad Salim](https://unsplash.com/@saadx?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,830,721,232,2701],"frontend",{"slug":2703,"featured":6,"template":700},"gitlab-hashicorp-terraform-vault-pt-1","content:en-us:blog:gitlab-hashicorp-terraform-vault-pt-1.yml","Gitlab Hashicorp Terraform Vault Pt 1","en-us/blog/gitlab-hashicorp-terraform-vault-pt-1.yml","en-us/blog/gitlab-hashicorp-terraform-vault-pt-1",{"_path":2709,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2710,"content":2716,"config":2721,"_id":2723,"_type":14,"title":2724,"_source":16,"_file":2725,"_stem":2726,"_extension":19},"/en-us/blog/gitlab-is-now-available-as-an-aws-codestar-connections-provider",{"title":2711,"description":2712,"ogTitle":2711,"ogDescription":2712,"noIndex":6,"ogImage":2713,"ogUrl":2714,"ogSiteName":685,"ogType":686,"canonicalUrls":2714,"schema":2715},"GitLab is now available as an AWS CodeStar Connections provider","AWS released native CodePipeline integration for GitLab projects and repos, helping to ensure a best-in-class experience when using GitLab and AWS together.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098884/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_397632156_3Ldy1urjMStQCl4qnOBvE0_1750098884409.jpg","https://about.gitlab.com/blog/gitlab-is-now-available-as-an-aws-codestar-connections-provider","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab is now available as an AWS CodeStar Connections provider\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2024-01-11\",\n      }",{"title":2711,"description":2712,"authors":2717,"heroImage":2713,"date":2718,"body":2719,"category":741,"tags":2720},[780],"2024-01-11","The GitLab DevSecOps Platform now integrates natively with many AWS services through AWS CodeStar Connections and AWS CodePipeline. This long-awaited integration was recently completed by the AWS CodeSuite service team for GitLab.com SaaS, GitLab Self-Managed, and GitLab Dedicated. AWS CodeStar Connections is a utility layer, which means other AWS services can enable native GitLab integration with less work.\n\nOnce created, CodeStar Connections objects can be used directly to integrate with many AWS services such as:\n- AWS CodePipeline,\n- Amazon CodeWhisperer Customization Capability,\n- AWS Service Catalog\n- AWS Glue\n\nWhen a CodeStar Connection is used to configure a GitLab CodePipeline configuration it can further support:\n- AWS CodeBuild\n- Amazon SageMaker MLOps Projects\n- AWS CodeDeploy\n\nGitLab and AWS have been working at ever deeper levels of technical and business integration to ensure that our co-customers have a best-in-class experience when using GitLab and AWS together.\n\n![AWS CodeStar integration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098901/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098900704.png)\n\nCheck out the complete list of AWS Services that are now directly accessible in the [GitLab AWS Integration Index documentation](https://docs.gitlab.com/ee/solutions/cloud/aws/gitlab_aws_integration.html).\n\n![CodeStar - New Technology and Solutions for using GitLab and AWS Together ](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098901/Blog/Content%20Images/Blog/Content%20Images/AWS_re_Invent_2023__New_Technology_and_Solutions_for_using_GitLab_and_AWS_Together__4__aHR0cHM6_1750098900705.png)\n\n## Resources\n\n- GitLab [AWS Integration Index documentation](https://docs.gitlab.com/ee/solutions/cloud/aws/gitlab_aws_integration.html) is a one-stop location for these new integrations as well as existing integrations\n- AWS documentation for [setting up CodeStar Connections with GitLab.com SaaS](https://docs.aws.amazon.com/codepipeline/latest/userguide/connections-gitlab-managed.html)\n- AWS documentation for [setting up CodeStar Connections with self-managed GitLab](https://docs.aws.amazon.com/codepipeline/latest/userguide/connections-gitlab-managed.html)\n - AWS documentation for [configuring AWS CodePipeline integration](https://docs.gitlab.com/ee/user/project/integrations/aws_codepipeline.html)\n- [AWS announcement for GitLab CodePipeline Integration for GitLab SaaS](https://aws.amazon.com/about-aws/whats-new/2023/08/aws-codepipeline-supports-gitlab/) and [AWS announcement for GitLab Self-Managed](https://aws.amazon.com/about-aws/whats-new/2023/12/codepipeline-gitlab-self-managed/)\n\n![codestar-amazonpartnerlogo](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098901/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098900705.png)\n",[1126,9,283,232],{"slug":2722,"featured":6,"template":700},"gitlab-is-now-available-as-an-aws-codestar-connections-provider","content:en-us:blog:gitlab-is-now-available-as-an-aws-codestar-connections-provider.yml","Gitlab Is Now Available As An Aws Codestar Connections Provider","en-us/blog/gitlab-is-now-available-as-an-aws-codestar-connections-provider.yml","en-us/blog/gitlab-is-now-available-as-an-aws-codestar-connections-provider",{"_path":2728,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2729,"content":2735,"config":2740,"_id":2742,"_type":14,"title":2743,"_source":16,"_file":2744,"_stem":2745,"_extension":19},"/en-us/blog/gitlab-journey-to-cicd",{"title":2730,"description":2731,"ogTitle":2730,"ogDescription":2731,"noIndex":6,"ogImage":2732,"ogUrl":2733,"ogSiteName":685,"ogType":686,"canonicalUrls":2733,"schema":2734},"GitLab's unconventional journey to CI/CD and Kubernetes","How the Delivery team at GitLab used our existing resources to overhaul our system to make way for CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678397/Blog/Hero%20Images/raphael-biscaldi-cicd.jpg","https://about.gitlab.com/blog/gitlab-journey-to-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's unconventional journey to CI/CD and Kubernetes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-10-03\",\n      }",{"title":2730,"description":2731,"authors":2736,"heroImage":2732,"date":2737,"body":2738,"category":718,"tags":2739},[1245],"2019-10-03","\nEngineering teams are under pressure to provide value in the form of new features, all while minimizing [cycle time](/blog/reduce-cycle-time/). Oftentimes the instinct is to adopt modern tooling to make that happen. Continuous integration and delivery (CI/CD) is baked into GitLab, our single application for the DevOps lifecycle, and we are undergoing a major migration to Kubernetes to speed up our cycle time even more. But our journey to CI/CD and eventually Kubernetes has been unconventional, as the [Delivery team](/handbook/engineering/infrastructure/team/delivery/) elected to stress our current system as we step into [continuous delivery](/topics/continuous-delivery/) on GitLab.com before migrating entirely over to Kubernetes.\n\n## Releases before CI/CD\n\nThe wider GitLab community and GitLab team members [averaged 55 commits per day between Aug. 7 and Sept. 27, 2019](https://gitlab.com/gitlab-org/gitlab-ee/-/graphs/master/charts) as they continually iterate on our product to build new features for our customers. But before we adopted continuous delivery, we had to institute feature freeze periods beginning on the 7th of each month. During this period, engineers would shift their focus from building new features to fixing bugs in preparation for the upcoming release, which always happens on the 22nd.\n\n The use of a specific defined deadline encouraged behavior that ultimately caused developers to focus more on the due date and not around accomplishing the work.\n\n\"... developers would really play around the 7th because they would think ‘Oh, I have time, the 7th is in seven days,’ and then on the 6th at midnight they would panic merge things,\" said [Marin Jankovski](/company/team/#marin), engineering manager for the Delivery team. \"Because they know that if they missed this deadline they will have to wait for the next month, and if they get it in under this deadline they have a good two weeks to fix any problems that happen.\"\n\nSince the conception of GitLab.com, the feature freeze was used as a stabilization period, Marin explained.\n\nSoon though, the demand for new features from new users was pushing us to escalate our development velocity on GitLab.com. The stabilization period slowed our cycle time and created a significant drag in our turnover time for bug fixes, regression, and feature shipping for users both on GitLab.com and self-managed customers.\n\n“In some cases (the feature freezes) would even cause platform instability due to the fact that highest priority fixes couldn't find its way into customer hands quick enough,” said Marin. “By moving to CD, we can get both features and bug fixes alike into the hands of our users much quicker.”\n\nBefore the [Delivery team was created to manage GitLab.com's transition to continuous delivery](/handbook/engineering/infrastructure/team/delivery/#top-level-responsibilities) – and eventually Kubernetes – we depended upon a [release manager](/blog/release-manager-the-invisible-hero/), a rotating position among developers, to prepare the release. The [release process was iterated on over a five-year period](/community/release-managers/) as the release managers created a knowledge base and some automation to make the release process work.\n\nBut this method was inefficient as the timing behind the deployment process and release preparations was unpredictable, taking between half a day to multiple days due to the [accumulation of manual tasks in the process](https://gitlab.com/gitlab-org/release/docs/blob/master/general/tooling.md).\n\n“The release manager would get a set task list to go through, a deadline by which the tasks should be completed and they would have to repeat these steps over again until the release is ready, but also stable on GitLab.com,” explained Marin. At the highest level overview, the release manager had to:\n\n*   Manually sync the various repositories that GitLab consists of\n*   Ensure that the correct versions are set in the manually created Git branches\n*   Once the release is tagged, manually deploy to GitLab.com environments for both non-production and production\n*   Verify that everything is operational and manually publish the packages for self-managed users\n\nDuring his [presentation on this topic at GitLab Commit Brooklyn](https://youtu.be/lD-cYylwOLg), Marin shared the results of a 2018 survey which revealed that in the 14-day period before a release, the Delivery team spent 60% of their time babysitting deploys, and another 26% of their time on manual or semi-manual tasks release tasks, such as writing the monthly release post.\n\n![Task breakdown before CI/CD](https://about.gitlab.com/images/blogimages/journey-to-cicd/release-task-spread.jpg){: .medium.center}\nResults of a 2018 survey showing how the Delivery team spent their time two weeks before a release, before continuous delivery.\n{: .note.text-center}\n\n\"If you take a look at the whole thing, in 14 days, in two weeks, my team did nothing but sit on the computer and watch, well, paint dry, I guess,\" said Marin.\n\nBut by tackling 86% of the pie (60% deploys + 26% of the release manual tasks), the Delivery team could solve a few problems:\n\n1.  No release delays\n1.  Repeatable and faster deploys to enable no downtime\n1.  More time for our GitLab.com Kubernetes migration\n1.  More space to prepare the organization for continuous delivery\n\nAlthough CD is only on GitLab.com, our self-managed customers also benefit from our transition to CD. Now anything that isn't caught with CI testing is tested automatically and manually in environments before ever reaching GitLab.com. Anything that requires a fix that does reach GitLab.com can be fixed in a few hours, so the final release for self-managed customers won't include these particular issues.\n\n## Our unique approach to transitioning to CD and Kubernetes\n\nThe transition from using feature freezes to adopting CD on GitLab.com was inevitable as our features set grew, and a team of engineers, led by Marin, was formed to oversee this transition: “The Delivery team has been formed with the sole purpose of moving the company to a CD model for GitLab.com but at the same time for migrating GitLab.com to the Kubernetes platform to enable easier scaling and even faster turnaround times.”\n\nMany companies in GitLab’s position would have started this journey to CI/CD and Kubernetes by first integrating the new technologies into their workflow, and amending the development process as they go. We opted for a different approach.\n\nThe migration to Kubernetes requires a shift in both production systems and the engineering mindset, explained Marin. Kubernetes offers some features that teams can easily leverage without any extra investment. But in order to derive the greatest value from the free features Kubernetes offers, there ought to be some existing CI/CD process already in place.\n\nThe Delivery team recognized that in order to smooth the transition to Kubernetes for continuous delivery, our engineers must already be working with a CI/CD mindset – this includes a strong focus on quality assessments (QA) and stricter feature planning. So the Delivery team went with the [boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions) and used our existing tools to build a CD system and reorganize the application infrastructure of GitLab.com instead of first adopting new tooling and technologies for CD.\n\n“The idea was simple,” said Marin. “We [leverage the tools at our disposal](https://gitlab.com/gitlab-org/release/docs/blob/master/general/deploy/auto-deploy.md), automate most of the manual tasks and ‘stress test’ the whole static system. If the static system can withstand the test, we move toward a more dynamic test.”\n\nThere were two key benefits to taking this approach:\n\n**First**, any weaknesses in our application were exposed and stabilized by automating with CI, so our application is stronger and less brittle, making a complete migration to Kubernetes more likely to be a success.\n\n**Second**, by shifting the engineering team to the CD mindset, we created a cultural shift among the engineers at GitLab who were accustomed to weekly deploys and waiting up to a day to see the impact of their merge.\n\n> “The definition of ‘done’ for developers has changed since the adoption of CI/CD,” said Marin.\n\nBefore CI/CD, a change was “done” once the review was completed. This was excluding deployments to various environments which took a considerable amount of time. Today, deployments are shipped within hours so there is no reason to not confirm that a change is working in testing and production environments.\n\nThe adoption of review apps on Kubernetes allow developers to run QA checks in virtually real time, and the use of [feature flags](/blog/feature-flags-continuous-delivery/) for progressive delivery also helps to accelerate development.\n\n“Since the first step in CD, developers are required to react to any automated QA but also carry out another level of manual verification in both non-production and production environments. Additionally, developers can have their changes running in production within a day compared to multiple days (and weeks).”\n\nEveryone can run QA checks on their code more frequently with CD. Because code changes are shipped around the clock with our CI/CD system, developers now operate an on-call rotation to help with any outstanding issues that are happening live on GitLab.com since the \"incubation\" time is much shorter.\n\n## Our new method\n\nSince the adoption of a CI/CD system, 90% of the [release process is automated](https://gitlab.com/gitlab-org/release/tasks/issues/885) using the [CI features of GitLab](/direction/verify/continuous_integration/). The remaining 10% requires human intervention due to coordination between various stakeholders.\n\n“We are slowly reducing those 10% as well with the goal of having only approvals needed to publish a release,” said Marin. [In the current iteration, the CI/CD process operates as follows](/direction/ops/):\n\n*   CI automatically looks for specific labels in merged MRs, applied by code reviewers and developers.\n*   CI automatically syncs all required repositories but also creates required Git branches, tags, as well as setting the correct versions of the release we want to ship.\n*   When the builds complete, packages are automatically deployed to non-production environments.\n*   Automated QA tasks are executed and, if passing, the deployment is rolled out to a small subset of users in production.\n*   In parallel, developers do another level of manual QA to ensure that new features are functioning as expected.\n*   If a high severity issue is discovered with manual verification, the deployments are stopped.\n*   When the above is completed, a member of the Delivery team will trigger a rollout to all users on GitLab.com.\n*   Self-managed release is then created from the last known working deployment running on GitLab.com.\n\nAs is true for any engineering team, scaling remains a challenge for us. But one of the biggest technical challenges is making sure there is enough QA coverage, which can be labor intensive for a product as big at GitLab.com. Also, making sure the monitoring and alerting is sufficient so the product isn’t operating solely based upon pre-set rules.\n\nThe second major challenge is the complexity of our GitLab.com system, and communicating the change in process across our engineering teams. “Dismantling more than five years of built-up process and habit is never easy,” said Marin.\n\n## The results\n\nGitLab is already benefitting from the shift to CI/CD in a number of ways.\n\nThe results of a new 2019 survey assessing how the Delivery team spends their time in the same 14-day period before the release shows that today, 82% of the team's time is freed up to work on other important tasks.\n\n![Task breakdown since CI/CD](https://about.gitlab.com/images/blogimages/journey-to-cicd/chart.jpg){: .medium.center}\nThe results of a 2019 survey measuring the same two weeks before the release shows the switch to CD has freed up valuable developer time.\n{: .note.text-center}\n\nBy automating manual tasks, the Delivery team was able to shift their focus toward changing the GitLab.com infrastructure to better support our development velocity and user traffic, as well as beginning the migration to Kubernetes.\n\n> \"And, did I mention, none of this is on Kubernetes. All of this is using our 'old' legacy system,\" said Marin to the GitLab Commit Brooklyn audience. \"But what happened with this is we bought ourselves time, so my team actually has time to work on the migration. But one of the biggest changes that happened was in the habits of the engineering organization.\"\n\nThe results since the shift have been significant. The Delivery team went from around seven deploys under the old system in May 2019 to 35 deploys on GitLab.com in August 2019, and is on track to surpass these numbers considerably now that they're shipping multiple deploys a day.\n\n“We have just completed the migration of our Registry service to Kubernetes and if you use [Container Registry on GitLab.com](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/70), all your requests are served from the Kubernetes platform,\" said Marin. \"Since GitLab is a multi-component system, we are continuing to isolate and migrate other services.”\n\nNew CI/CD features are included in each release. For example, in our 12.3 release, we [expanded the GitLab Container Registry to allow users to leverage CI/CD to build and push images/tags to their project](/releases/2019/09/22/gitlab-12-3-released/#remove-container-images-from-cicd) among other exciting new features.\n\n## Transitioning your system to continuous delivery?\n\nFor companies considering the transition to CD, Marin advised to start with what you’ve got.\n\n“From my perspective, waiting for migrating to a new platform is the real ‘enemy,’” said Marin. “Most systems can be altered in some ways to enable faster turnaround time without migrating to a fully new system. Speeding up the development/release cycle has multiplier return per engineer in that system and that frees up more time for migrations to new platforms, such as Kubernetes.”\n\nIf you’re curious about what’s up next, [check out this detailed summary of the exciting new CI/CD features](/blog/a-look-ahead-for-gitlab-cicd/) on track to be released in 12.4 and beyond.\n\n## Missed GitLab Commit Brooklyn?\n\nIf you missed Marin's presentation on the prequel to Kubernetes, watch the entire video below and catch us in Europe at [GitLab Commit London on October 9](/events/commit/)!\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/lD-cYylwOLg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n[Cover Photo](https://unsplash.com/photos/rE3kbKmLmhE) by [Raphaël Biscaldi](https://unsplash.com/@les_photos_de_raph?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/journey?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,1064],{"slug":2741,"featured":6,"template":700},"gitlab-journey-to-cicd","content:en-us:blog:gitlab-journey-to-cicd.yml","Gitlab Journey To Cicd","en-us/blog/gitlab-journey-to-cicd.yml","en-us/blog/gitlab-journey-to-cicd",{"_path":2747,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2748,"content":2753,"config":2759,"_id":2761,"_type":14,"title":2762,"_source":16,"_file":2763,"_stem":2764,"_extension":19},"/en-us/blog/gitlab-mitre-attack-navigator",{"title":2749,"description":2750,"ogTitle":2749,"ogDescription":2750,"noIndex":6,"ogImage":1751,"ogUrl":2751,"ogSiteName":685,"ogType":686,"canonicalUrls":2751,"schema":2752},"Use GitLab and MITRE ATT&CK Navigator to visualize adversary techniques","This tutorial helps build and deploy a customized version of MITRE's ATT&CK Navigator using GitLab CI/CD and GitLab Pages.","https://about.gitlab.com/blog/gitlab-mitre-attack-navigator","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Use GitLab and MITRE ATT&CK Navigator to visualize adversary techniques\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chris Moberly\"}],\n        \"datePublished\": \"2023-08-09\",\n      }",{"title":2749,"description":2750,"authors":2754,"heroImage":1751,"date":2756,"body":2757,"category":697,"tags":2758},[2755],"Chris Moberly","2023-08-09","If you use [MITRE ATT&CK](https://attack.mitre.org/) for classifying\ncybersecurity incidents, you may want to visualize your coverage across a\nmatrix. This blog will show you how to do this automatically with GitLab by\ndeploying the [ATT&CK\nNavigator](https://github.com/mitre-attack/attack-navigator) web application\npre-populated with your own annotated matrices.\n\n\nWe make this easy by providing a fully working [example\nproject](https://gitlab.com/gitlab-com/gl-security/threatmanagement/redteam/redteam-public/gitlab-hosted-attack-navigator)\nfor you to fork and customize. When you're finished, you'll have an\ninteractive visualization tool that displays your coverage of techniques\nacross the ATT&CK framework.\n\n\n![Image showing ATT&CK Navigator deployed from example\nproject](https://about.gitlab.com/images/blogimages/2023-08-15-gitlab-mitre-attack-navigator/navigator-portal.png)\n\nATT&CK Navigator deployed from our example project\n\n{: .note.text-center}\n\n\n## About MITRE ATT&CK framework\n\nMITRE ATT&CK is a framework to classify and describe cybersecurity attacks\nbased on real-world observations. It provides a common language that can be\nused by different groups inside a security organization to collaborate on\nsecurity initiatives.\n\n\nFor example, when a company's Red Team emulates an attack based on the\ntechniques of a relevant adversary, they deliver a report that includes a\nlist of the specific technique IDs involved in the exercise. The team in\ncharge of detecting and responding to these attacks can use those IDs to\nresearch and implement improved defensive capabilities.\n\n\nBoth of these groups may want to track their coverage of offensive and\ndefensive capabilities across one of [MITRE's ATT&CK\nmatrices](https://attack.mitre.org/matrices/enterprise/). These matrices are\ncharts that visualize attack tactics and techniques relevant to specific\nindustries and technologies. For example, a company like GitLab may be\ninterested in understanding which techniques in the [Cloud\nmatrix](https://attack.mitre.org/matrices/enterprise/cloud/) we have\nemulated to test our detection and response capabilities.\n\n\nMITRE provides a free interactive web application, ATT&CK Navigator, to\nvisualize, annotate, and explore these matrices.\n\n\nAt GitLab, [our Red\nTeam](https://about.gitlab.com/handbook/security/threat-management/red-team/)\nproduces a new Navigator matrix at the completion of each operation. The\nmatrix highlights which attack techniques we've conducted. We find it useful\nto view all of these matrices in a single location, with the addition of a\ncombined matrix showing all of the techniques we have conducted across all\nhistorical operations. This helps us understand trends and identify\npotential gaps to cover in future operations.\n\n\n## Fork our example project to get started\n\nYou can fork our example project to get going on your own. First, you will\nneed an account on [GitLab.com](https://gitlab.com), or on a self-hosted\ninstance [with GitLab Pages\nenabled](https://docs.gitlab.com/ee/administration/pages/).\n\n\nNext, browse to [our example\nproject](https://gitlab.com/gitlab-com/gl-security/threatmanagement/redteam/redteam-public/gitlab-hosted-attack-navigator)\nand click on the \"Forks\" button to create a new fork. Fill in the form shown\nbelow by choosing a name, location, and description for your copy of the\nproject. You may wish to change \"Visibility level\" to \"Private,\" which will\nrequire authentication to view your deployed application.\n\n\n![Example project showing where to click on the Forks\nbutton](https://about.gitlab.com/images/blogimages/2023-08-15-gitlab-mitre-attack-navigator/fork-project.png)\n\n\nAfter that, take a look inside the `layers/` folder of your new project.\nEach file in this folder will be used to pre-populate a new tab in the\ndeployed web application where the listed attack techniques are annotated in\ngreen. We provide two example files, as well as an empty template file\ninside `templates/template.yml`.\n\n\nTemplates should list specific MITRE ATT&CK \"technique\" IDs as bullets below\ntheir corresponding MITRE ATT&CK tactic. You only need to include the\ntactics for which you will add techniques. Here is a short example of\ntechniques across four tactics:\n\n\n```yaml\n\nname: Operation 1\n\ndescription: Example of a Red Team operation - not real data\n\ntechniques:\n  initial-access:\n    - T1190\n  privilege-escalation:\n    - T1611\n    - T1055\n  lateral-movement:\n    - T1210\n    - T1021\n  exfiltration:\n    - T1041\n```\n\n\nYou can edit the example files or delete them and add new files of your own.\n\n\nThe next step is to build and deploy the web application. If you made any\nchanges to the `layers/` folder inside your default branch, this build\nprocess should have started automatically. If you just want to deploy the\napplication using the provided sample data, browse to \"Build\" -\\>\n\"Pipelines\" using the project's sidebar and click the \"Run pipeline\" button.\n\n\n![Image showing how to build and deploy app with run pipeline\nbutton](https://about.gitlab.com/images/blogimages/2023-08-15-gitlab-mitre-attack-navigator/run-pipeline.png)\n\n\n\nYou can check to see if a pipeline has completed successfully by browsing to\n\"Build\" -\\> \"Jobs\" in the sidebar. There should be at least two jobs with a\nstatus of \"passed\" - one named `build_navigator` and one named `pages`.\n\n\nOnce both jobs are complete, browse to \"Deploy\" -\\> \"Pages\" in the project's\nsidebar. Check the \"Use unique domain\" box and click the \"Save changes\"\nbutton.\n\n\n![Image showing complete jobs with save changes\nbutton](https://about.gitlab.com/images/blogimages/2023-08-15-gitlab-mitre-attack-navigator/pages-settings.png)\n\n\n\nThat's it! Your application should now be accessible using the URL provided\nunder \"Access pages\" on the same page.\n\n\n## How the example project works\n\nThere are three main components inside the example project:\n\n- The `gitlab-ci.yml` file. This defines what jobs run, and when they run.\nThe file contains rules to automatically run the jobs whenever you push\nchanges to the `layers/` folder inside the project's default branch, or\nwhenever you manually run a pipeline as described above.\n\n- The `create-layers.py` file. This Python script takes the custom YAML\nfiles you provide, and then generates individual and combined JSON files in\nthe format that ATT&CK Navigator expects. You can customize this file to\nchange the annotation color (`HIGHLIGHT_COLOR`) as well as some of the\napplication defaults (`JSON_BLOB`). In our internal version, for example, we\nremove some of the platform filters that don't apply to us.\n\n- The `build-navigator.sh` file. The shell script is responsible for\ndownloading the ATT&CK Navigator application, injecting the custom layers we\ncreate, and building the application. The output is a static website that we\nthen host using GitLab Pages.\n\n\n## More features of the ATT&CK Navigator app\n\nOnce deployed, the application is interactive. This blog focuses on using a\ncolored annotation to visualize coverage, but the [application is capable of\nmuch\nmore](https://github.com/mitre-attack/attack-navigator/blob/master/USAGE.md).\n\n\nHere are some features we use often:\n\n- In the \"layer controls\" bar up top, click on the button that looks like an\nasterisk surrounded by up and down arrows. The name is \"expand annotated\nsub-techniques.\" This will ensure that all techniques in your YAML file are\neasily viewable, as subtechniques may be hidden away when the matrix first\nloads.\n\n- If you want to hide all unannotated techniques, click anywhere in the\nmatrix and select \"select unannotated.\" Then, under \"technique controls,\"\nclick on the \"toggle state\" button. This will give you a much cleaner\nmatrix, showing only the specific techniques you've provided in your custom\nYAML files.\n\n\n![ATT&CK Navigator with unannotated techniques\nhidden](https://about.gitlab.com/images/blogimages/2023-08-15-gitlab-mitre-attack-navigator/navigator-collapsed.png)\n\nATT&CK Navigator with unannotated techniques hidden\n\n{: .note.text-center}\n\n\nATT&CK is not a bingo card, and for most organizations the goal shouldn't be\nto have green squares across the board. Instead, you should use this to\nunderstand your own coverage and how it relates to the threats most relevant\nto you.\n\n\n## Share your feedback\n\nMITRE ATT&CK Navigator is a great tool for visualizing coverage of attack\ntechniques across a matrix. Using our example project, you can easily deploy\nan automated solution that builds this tool and pre-populates it with your\nown data.\n\n\nOn our Red Team, we love to find creative use cases for GitLab, and this is\none we use ourselves. If you find this useful, or if you have any ideas to\nimprove it, we would love to hear from you! Feel free to open an issue or a\nmerge request inside our [example\nproject](https://gitlab.com/gitlab-com/gl-security/threatmanagement/redteam/redteam-public/gitlab-hosted-attack-navigator).\n",[697,827,917,9],{"slug":2760,"featured":6,"template":700},"gitlab-mitre-attack-navigator","content:en-us:blog:gitlab-mitre-attack-navigator.yml","Gitlab Mitre Attack Navigator","en-us/blog/gitlab-mitre-attack-navigator.yml","en-us/blog/gitlab-mitre-attack-navigator",{"_path":2766,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2767,"content":2773,"config":2780,"_id":2782,"_type":14,"title":2783,"_source":16,"_file":2784,"_stem":2785,"_extension":19},"/en-us/blog/gitlab-on-vmware-cloud-marketplace",{"title":2768,"description":2769,"ogTitle":2768,"ogDescription":2769,"noIndex":6,"ogImage":2770,"ogUrl":2771,"ogSiteName":685,"ogType":686,"canonicalUrls":2771,"schema":2772},"GitLab for Cloud Native Transformation on VMware Marketplace","Guest authors from VMware share how to accelerate your software delivery process in just a few clicks with Bitnami and GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680841/Blog/Hero%20Images/bitnami-gitlab.png","https://about.gitlab.com/blog/gitlab-on-vmware-cloud-marketplace","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Enterprise Edition now available for VMware Cloud Marketplace users\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Raquel Campuzano\"}],\n        \"datePublished\": \"2019-10-11\",\n      }",{"title":2774,"description":2769,"authors":2775,"heroImage":2770,"date":2777,"body":2778,"category":300,"tags":2779},"GitLab Enterprise Edition now available for VMware Cloud Marketplace users",[2776],"Raquel Campuzano","2019-10-11","\n\nHave you ever tried to choose from an extensive list of developer tools and wondered what you should do next? You’re not alone. There are hundreds of solutions to choose from, which can make it challenging to select the right solution and deploy.\n\nNow, GitLab and Bitnami have partnered to offer VMware users [GitLab](https://marketplace.cloud.vmware.com/services/details/129dc4e9-191d-405f-ab4d-803d56f366a9) in the VMware Cloud Marketplace. This version package is free, fully functional, and easy to [upgrade to an enterprise plan](https://docs.bitnami.com/vmware-marketplace/apps/gitlab-ee/get-started/license/).\n\n### Reduce costs and avoid security risks\nAs the industry leader in application packaging, Bitnami helped GitLab create an easy, click-to-deploy, open source solution. The GitLab Enterprise Edition (CORE) Virtual Appliance certified by Bitnami is an up-to-date and secure image that includes the latest versions of the application, its components, and the most recent security fixes. You can run GitLab with confidence; Bitnami’s automated pipeline and tools for building and testing applications ensure this application can run on any platform without issues. If you experience any problems deploying the solution, you can contact the [Bitnami Support team](https://community.bitnami.com/c/gitlab) with your questions.\n\n### Run on VMware infrastructure in a few clicks\nTo make GitLab available in the [VMware Cloud Marketplace](https://marketplace.cloud.vmware.com/services/details/129dc4e9-191d-405f-ab4d-803d56f366a9), GitLab placed its trust in Bitnami’s expertise in packaging. GitLab users now have the ability to run the latest version on their VMware infrastructure in a few clicks.\n\n### Some of the key benefits of GitLab's marketplace listing:\n* GitLab includes a built-in container registry and Kubernetes integration, enabling you to quickly create a [continuous integration (CI)](/solutions/continuous-integration/) pipeline with Kubernetes. Learn more about [creating a CI/CD pipeline with GitLab and Kubernetes](https://docs.bitnami.com/tutorials/create-ci-cd-pipeline-gitlab-kubernetes/).\n* By deploying GitLab on a VMware cloud server, you can add a budget- and resource-checking stage to your pipeline. This allows you to implement best practices into your continuous deployment (CD) process and control the consumption and costs of your application deployments.\n* Premium features such as code quality and performance testing, static and dynamic application security testing, package dependency analysis, and automated tests for vulnerabilities enable you to identify and remediate issues and security breaches from development to monitoring stages. Learn more about [building misconfiguration and vulnerability checks into your CI/CD pipeline to achieve continuous security](https://thenewstack.io/how-continuous-security-can-solve-the-cloud-protection-conundrum/).\n\n### How do you get started? We’ll show you how\nIn order to upgrade your GitLab Core version to enjoy the Enterprise Edition features, take the following steps:\n\n1) First log into the [VMware Cloud Marketplace](https://marketplace.cloud.vmware.com/services/details/129dc4e9-191d-405f-ab4d-803d56f366a9) and browse for the “GitLab Enterprise (CORE) Virtual Appliance” solution.\n\n2) Then click to view the details. Note: The GitLab Enterprise (CORE) Virtual Appliance is available in the [VMware Cloud Marketplace](https://marketplace.cloud.vmware.com/services/details/129dc4e9-191d-405f-ab4d-803d56f366a9) in two deployment options: VMware Cloud on AWS (VMC) or vCloud Director (VCD).\n\n![GitLab is available in the VMware Cloud Marketplace in two deployment options: VMware Cloud on AWS (VMC) or vCloud Director (VCD)](https://about.gitlab.com/images/blogimages/gitlabonvmware1.png){: .shadow.medium.center}\n\n3) To deploy the application both on VMC or VCD, you need to first subscribe to the image, as shown below:\n\n![To deploy the application both on VMC or VCD, you need to first subscribe, as shown below](https://about.gitlab.com/images/blogimages/subscribetovmwmarketplace.png){: .shadow.medium.center}\n\n4) Then, select the platform where you wish to deploy it, as shown below:\n\n![After subscribing, select the VMC or VCD platform where you wish to deploy](https://about.gitlab.com/images/blogimages/deploytovmwplatform.png){: .shadow.medium.center}\n\n5) Depending on the platform you select, you will be redirected to the vSphere Client or vCloud Director platform. Follow these instructions to launch a [GitLab Enterprise (CORE) Virtual Appliance using the vSphere Client](https://docs.bitnami.com/vmware-marketplace/apps/gitlab-ee/get-started/get-started-vmware-cloud/) or as a [vApp from VMware vCloud Director](https://docs.bitnami.com/vmware-marketplace/get-started-vcloud-director/).\n\n6) When you deploy the [GitLab Enterprise (CORE) Virtual Appliance certified by Bitnami](https://marketplace.cloud.vmware.com/services/details/129dc4e9-191d-405f-ab4d-803d56f366a9), you get the free and fully functional [Core version of GitLab](/pricing/#self-managed), which is easily upgradable to Starter, Premium, or Ultimate. To upgrade, sign into the application, navigate to the “Admin Area,” and then select the “License” menu option. As you can see in the image below, you now have the option to either upload your `.gitlab-license` file or start a [free trial](/free-trial/).\n\nNote: If you start a free trial, you will be able to try all the paid features for the duration of the trial. After that time, your server will revert to Core features.\n{: .alert .alert-info}\n\n![To upgrade, sign into the application, navigate to the “Admin Area,” and then select the “License” menu option](https://about.gitlab.com/images/blogimages/vmwmarketplacefreetrial.png){: .shadow.medium.center}\n\n\n7) Once you activate your license, paid features will be enabled as shown below and you can start deploying with confidence.\n\n![Once you activate your license, paid features will be enabled](https://about.gitlab.com/images/blogimages/vmwpremiumfeatures.png){: .shadow.medium.center}\n\n## Conclusion\n\nWhat used to be a complex task is now just a few clicks, without compromising your budget and your security. Enjoy all the advantages of the GitLab in the VMware Cloud Marketplace and accelerate your software delivery process by leveraging the simplicity of the Bitnami experience.\n\n[Get started now](https://marketplace.cloud.vmware.com/services/details/129dc4e9-191d-405f-ab4d-803d56f366a9). If you have any questions, feel free to reach out to the Bitnami Support team!\n\n### About the guest author\n\nRaquel Campuzano is a Content Marketing Specialist at Bitnami, now part of VMware. She is in charge of managing the creation of technical content that allows developers to deploy awesome software everywhere. Raquel was part of the Bitnami team as technical writer. Her know-how creating tutorials, product documentation, and videos gave her the ability to identify in which stage of developer’s journey the user experience can be improved.\n\nPrevious to Bitnami, she led the communication and marketing strategy for Redborder (cybersecurity) and Oklan (network and hosting services). She is also a member of Ping a Programadoras, a non-profit organisation focused on promoting women’s inclusion in programming and software development.\n",[9,830,721,232],{"slug":2781,"featured":6,"template":700},"gitlab-on-vmware-cloud-marketplace","content:en-us:blog:gitlab-on-vmware-cloud-marketplace.yml","Gitlab On Vmware Cloud Marketplace","en-us/blog/gitlab-on-vmware-cloud-marketplace.yml","en-us/blog/gitlab-on-vmware-cloud-marketplace",{"_path":2787,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2788,"content":2794,"config":2802,"_id":2804,"_type":14,"title":2805,"_source":16,"_file":2806,"_stem":2807,"_extension":19},"/en-us/blog/gitlab-pages-features-review-apps-and-multiple-website-deployment",{"title":2789,"description":2790,"ogTitle":2789,"ogDescription":2790,"noIndex":6,"ogImage":2791,"ogUrl":2792,"ogSiteName":685,"ogType":686,"canonicalUrls":2792,"schema":2793},"GitLab Pages features review apps and multiple website deployment","GitLab Pages helps organizations reap the rewards of knowledge management, including better collaboration and accessibility. Learn how to use a new feature, Parallel Deployments.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674550/Blog/Hero%20Images/blog-image-template-1800x945__1_.png","https://about.gitlab.com/blog/gitlab-pages-features-review-apps-and-multiple-website-deployment","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Pages features review apps and multiple website deployment\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matthew Macfarlane\"},{\"@type\":\"Person\",\"name\":\"Janis Altherr\"}],\n        \"datePublished\": \"2024-09-23\",\n      }",{"title":2789,"description":2790,"authors":2795,"heroImage":2791,"date":2798,"body":2799,"category":693,"tags":2800,"updatedDate":2801},[2796,2797],"Matthew Macfarlane","Janis Altherr","2024-09-23","[GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/) has long been\na popular choice for hosting static websites, allowing users to showcase\ntheir projects, blogs, and documentation directly from their repositories.\n\n\nBefore GitLab 17.4, you could only have a single version of your GitLab\nPages website. So you couldn’t preview your changes or have multiple\nversions of your website deployed simultaneously. Now, with a Premium or\nUltimate license, you can do both!\n\n\n### Introducing Parallel Deployments\n\n\nWith Parallel Deployments, users can now easily preview changes and manage\nmultiple environments for their GitLab Pages sites. This enhancement allows\nseamless experimentation with new ideas, enabling users to confidently test\nand refine their sites. By catching any issues early, users can ensure the\nlive site remains stable and polished, building on the already great\nfoundation of GitLab Pages.\n\n\n### Why Parallel Deployments is a game-changer\n\n\n1. **Version control made easy**\\\n   If your project involves software development or documentation that covers multiple versions (such as user guides for different software releases), Parallel Deployments makes it easy to manage. Or you can use the feature to localize your website for different languages.\n2. **Flexibility to experiment**\\\n   Want to try out a new design or feature? Parallel Deployments lets you experiment freely. You can create a separate version of your site to test new ideas without impacting the current site. This flexibility encourages creativity and continuous improvement.\n\n### How to add review apps to your GitLab Pages project\n\n\nTo add a review app to your GitLab Pages project, edit your `.gitlab-ci.yml`\nfile to create a deployment for each merge request (MR). Let’s assume you\nstart with a `.gitlab-ci.yml` file somewhat like this:\n\n\n```yaml\n\ncreate-pages:\n  stage: deploy\n  script:\n    - npm run build\n  pages: \n    publish: dist # the name of the folder containing the pages files\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH # only run this job when there's a commit to the default branch\n```\n\n\nTo also run the pages pipeline when there’s an MR being opened or updated,\nwe can add another rule to `pages.rules`:\n\n\n```yaml\n\n- if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n\n```\n\n\nIf we only add this rule, however, each Pages job will always replace the\nmain deployment – each time an MR is opened! You likely don’t want that to\nhappen.\n\n\nTo provide each individual deployment with its own URL, we’ve introduced the\nnew `pages.path_prefix` property.\n\n\nA Pages deployment with this configuration...\n\n\n```yaml\n\ncreate-pages:\n  script:\n    - ...\n  pages:\n    ...\n    path_prefix: my-review-app\n```\n\n\n...will be available at\n`https://my-pages-app-7fe824.gitlab.io/my-review-app`, or, with unique\ndomains disabled, `https://my-group.gitlab.io/my-project/my-review-app`.\n\n\nBut there’s no need to hardcode the path_prefix. You can dynamically\ngenerate it using CI variables. That’s particularly useful for review apps –\nto create a path for each MR, use the `CI_MERGE_REQUEST_IID variable`:\n\n\n```yaml\n\ncreate-pages:\n  script:\n    - ...\n  pages:\n    ...\n    path_prefix: mr-$CI_MERGE_REQUEST_IID\n```\n\n\nAn MR with the ID 114 would then automatically create a deployment at\n`https://my-pages-app-7fe824.gitlab.io/mr-114`.\n\n\nWith those concepts at hand, we’d like our pipeline to dynamically create\neither a main deployment for the default branch, or a path_prefixed-review\napp for MR events.\n\n\nFirst, let’s add a `create-pages-review-app` job to our pipeline config:\n\n\n```yaml\n\ncreate-pages-deployment:\n  # This job will create a pages deployment without path_prefix\n  # when there is a commit to the default branch\n  stage: deploy\n  script:\n    - npm run build\n  pages: \n    publish: dist \n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n\ncreate-pages-review-app:\n  # This job will create a pages deployment with a path_prefix\n  # when there a merge request is created or updated.\n  stage: deploy\n  script:\n    - npm run build\n  pages:\n    publish: dist \n    path_prefix: 'mr-$CI_MERGE_REQUEST_IID' # Prefix with the mr-\u003Ciid>, like `mr-123`\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n```\n\n\nNow you’re creating a deployment both when pushing to the default branch,\nand prefixed parallel deployments when creating or updating MRs!\n\n\nFor the best experience, add the URL to the environment job property. This\nwill add a link to the review app to the MR page:\n\n\n```yaml\n\ncreate-pages-deployment:\n  # This job will create a pages deployment without path_prefix\n  # when there is a commit to the default branch\n  stage: deploy\n  script:\n    - npm run build\n  pages: \n    publish: dist \n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n\ncreate-pages-review-app:\n  # This job will create a pages deployment with a path_prefix\n  # when there a merge request is created or updated.\n  stage: deploy\n  script:\n    - npm run build\n  pages:\n    publish: dist \n    path_prefix: 'mr-$CI_MERGE_REQUEST_IID' # Prefix with the mr-\u003Ciid>, like `mr-123`\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n  environment:\n    name: \"Pages Review MR ${CI_MERGE_REQUEST_IID}\"\n    url: $CI_PAGES_URL\n```\n\n\nCongratulations, you’ve now set up MR review apps for your Pages site.\n\n\n## How to deploy documentation for different versions of your product\n\n\nThe Parallel Deployments feature is also a useful tool if you maintain the\ndocumentation of multiple versions of your software simultaneously.\n\n\nThe below CI config will not only create a pages deployment when there is a\ncommit to the default branch, but also for any commit to branches named\n`v1`, `v2`, or `v3`.\n\n\n```yaml\n\ncreate-pages:\n  stage: deploy\n  script:\n    - ...\n  variables:\n    PAGES_PREFIX: \"$CI_COMMIT_BRANCH\" # Use the branch name by default\n  pages:\n    path_prefix: \"$PAGES_PREFIX\" # use whatever value is set in the variable\n  environment:\n    name: \"Pages ${PAGES_PREFIX}\"\n    url: $CI_PAGES_URL\n  artifacts:\n    paths:\n    - public\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n      variables:\n        PAGES_PREFIX: '' # No prefix\n    - if: $CI_COMMIT_BRANCH == 'v1'\n    - if: $CI_COMMIT_BRANCH == 'v2'\n    - if: $CI_COMMIT_BRANCH == 'v3'\n```\n\n\nBy using the `$CI_COMMIT_BRANCH` variable as the path_prefix value, each of\nthese branches will deploy their documentation to their own sub-path of your\nwebsite:\n\n\n- The branch named v1 has its docs published to \u003Cmy-domain>/v1.\n\n- The branch named v2 has its docs published to \u003Cmy-domain>/v2.\n\n- The branch named v3 has its docs published to \u003Cmy-domain>/v3.\n\n\nA new commit to one of these branches will then trigger a new deployment to\nits respective path, keeping the documentation of multiple versions up to\ndate.\n\n\nThe Parallel Deployments feature is a significant upgrade to GitLab Pages,\noffering a more flexible and efficient way to manage your knowledge. Whether\nyou're working on a small project or a large-scale site with multiple\nversions, this new capability will make your workflow smoother and more\nefficient\n\n\n> Visit our [Parallel Deployments\ndocumentation](https://docs.gitlab.com/ee/user/project/pages/#create-multiple-deployments)\nto get started today!\n\n\n### Feedback\n\n\nShare your ideas and other comments in our [feedback\nissue](https://gitlab.com/gitlab-org/gitlab/-/issues/482040)!\n",[999,9,696,695,693,917],"2025-04-09",{"slug":2803,"featured":6,"template":700},"gitlab-pages-features-review-apps-and-multiple-website-deployment","content:en-us:blog:gitlab-pages-features-review-apps-and-multiple-website-deployment.yml","Gitlab Pages Features Review Apps And Multiple Website Deployment","en-us/blog/gitlab-pages-features-review-apps-and-multiple-website-deployment.yml","en-us/blog/gitlab-pages-features-review-apps-and-multiple-website-deployment",{"_path":2809,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2810,"content":2816,"config":2822,"_id":2824,"_type":14,"title":2825,"_source":16,"_file":2826,"_stem":2827,"_extension":19},"/en-us/blog/gitlab-pages-for-covid",{"title":2811,"description":2812,"ogTitle":2811,"ogDescription":2812,"noIndex":6,"ogImage":2813,"ogUrl":2814,"ogSiteName":685,"ogType":686,"canonicalUrls":2814,"schema":2815},"Using GitLab Pages to Report Local COVID-19 Rates","How I used GitLab pages to publish up-to-date local infection rates.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681476/Blog/Hero%20Images/thisisengineering-raeng-0jTZTMyGym8-unsplash.jpg","https://about.gitlab.com/blog/gitlab-pages-for-covid","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Using GitLab Pages to Report Local COVID-19 Rates\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matt Nohr\"}],\n        \"datePublished\": \"2020-08-06\",\n      }",{"title":2811,"description":2812,"authors":2817,"heroImage":2813,"date":2819,"body":2820,"category":978,"tags":2821},[2818],"Matt Nohr","2020-08-06","{::options parse_block_html=\"true\" /}\n\n\n\n\n## Finding Local COVID Rates\n\n\nI live in the U.S. state of Minnesota. Recently the state government\nprovided recommendations for how and when to open schools in the fall. The\nguidance was based on the infection rates of the COVID-19 disease. In simple\nterms, the higher the rates, the less in-person the school should be. The\nactual calculation I needed was:  \n\n\n```\n\ntotal number of cases in your area over the past 2 weeks per 10,000\nresidents\n\n````\n\n\nI have three kids in school, so when I heard this recommendation I went to\nfind out this value for my area. It turned out to be a difficult statistic\nto find. Along with the announcement my state government released a set of\ndata, but it was about three weeks behind the current rates. I found\ndifferent sets of data available, but they either reported the daily case\nrate or a total count of infections, not this very specific calculation.\n\n\nSee Also:\n\n- [GitLab's Handbook on COVID-19\nbenefits](https://about.gitlab.com/handbook/total-rewards/benefits/covid-19/)\n\n- [How an analytics software startup took aim at\nCOVID-19](https://about.gitlab.com/blog/startup-covid-tracking/)\n\n\n## GitLab Pages to the Rescue\n\n\nI started by manually calculating the values with the data that I could\nfind. This worked, but every time there were updated statistics, I had to go\nback and recalculate the value. I wanted a way to have this information\navailable for me and others with the up-to-date information whenever I\nlooked at it.\n\n\nMy process and output quickly evolved:\n\n\n1. I decided I would just write a script to download the data and do the\ncalculation for me\n\n1. Once I had this working I thought the next step would be to automatically\ngraph the results \n\n1. Then I thought I could publish the graphs on a website \n\n1. If I was going to publish them, I thought the best thing to do to get\nthis done quickly would be to use [GitLab\nPages](https://docs.gitlab.com/ee/user/project/pages/).\n\n\nThe result is a simple static website:\n[https://mattnohr.gitlab.io/covid-county/](https://mattnohr.gitlab.io/covid-county/)\n\n\n![Example Chart from\nwebsite](https://about.gitlab.com/images/blogimages/gitlab-pages-for-covid/output-chart.png){:\n.shadow.center}\n\n\n## How It Works\n\n\nThe basic flow for my new “system” is:\n\n\n```plantuml\n\n(*) --> \"Download data\"\n\n--> \"Calculate the rates\"\n\n--> \"Create a new .csv file with daily calculated values\"\n\n--> \"Publish .csv file to GitLab pages\"\n\n--> \"Use GitLab pages to serve static website that reads .csv\"\n\n--> (*)\n\n```\n\n\nThe first few steps are done with a simple [Kotlin](https://kotlinlang.org/)\nscript that is run using the [Gradle build tool](https://gradle.org/). I\nused [GitLab CI/CD](https://docs.gitlab.com/ee/ci/) pipelines to run a job\nto do that automatically. You can find an [example gradle .gitlab-ci.yml\nfile\nhere](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Gradle.gitlab-ci.yml).\nThe basics of this step for me look like:\n\n\n```yml\n\nbuild:\n  stage: build\n  script: gradle --build-cache run\n```\n\n\nThe next step was to get it published to GitLab pages. That also used a\nGitLab CI/CD job. It simply needed to move the .csv results out of the build\ndirectory into the “public” directory that is used to host GitLab pages. \n\n\n```yml\n\npages:\n  stage: deploy\n  dependencies:\n    - build\n  script:\n    - mv build/data.csv public/\n```\n\n\nThe actual static webpage uses [d3.js](https://d3js.org/) that is able to\nread the data from a .csv file and graph it.\n\n\nMy GitLab project can be found here:\n[https://gitlab.com/mattnohr/covid-county](https://gitlab.com/mattnohr/covid-county)\n\n\n## Running on a Schedule\n\n\nOnce I had the system up and running with GitLab CI, I was able to use\n[GitLab Pipeline\nSchedules](https://docs.gitlab.com/ee/ci/pipelines/schedules.html) to run\nthe script a few times a day to get updated data. Now I do not have to worry\nabout when the data is updated, I can just review my GitLab Pages site to\nsee the latest values.\n\n\nPipeline Scheudles let you easily schedule pipelines daily, weekly, or\nmonthly. Since I wanted this to run multiple times a day, I used a cron\nschedule to run the pipeline 4 times a day on weekdays:\n\n\n```\n\n0 8,12,16,20 * * 1-5\n\n```\n\n\n## Result\n\n\nNow I have a [simple website](https://mattnohr.gitlab.io/covid-county/) that\nhas the most up-to-date calculations for this specific value for my local\narea. Now I just need to wait for our local school board to make a final\ndecision on how schools will look!\n\n\n\u003C!-- image: image-url -->\n\nCover image by [@ThisisEngineering\nRAEng](https://unsplash.com/@thisisengineering) on\n[Unsplash](https://unsplash.com/photos/0jTZTMyGym8)\n\n{: .note}\n",[549,999,874,9],{"slug":2823,"featured":6,"template":700},"gitlab-pages-for-covid","content:en-us:blog:gitlab-pages-for-covid.yml","Gitlab Pages For Covid","en-us/blog/gitlab-pages-for-covid.yml","en-us/blog/gitlab-pages-for-covid",{"_path":2829,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2830,"content":2836,"config":2842,"_id":2844,"_type":14,"title":2845,"_source":16,"_file":2846,"_stem":2847,"_extension":19},"/en-us/blog/gitlab-provides-small-business-with-a-professional-mature-devops-platform",{"title":2831,"description":2832,"ogTitle":2831,"ogDescription":2832,"noIndex":6,"ogImage":2833,"ogUrl":2834,"ogSiteName":685,"ogType":686,"canonicalUrls":2834,"schema":2835},"GitLab provides small business with a professional, mature DevOps platform","Blonk had a small team but a big need for professional software development. Here's how GitLab helped.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668272/Blog/Hero%20Images/blonklogo.png","https://about.gitlab.com/blog/gitlab-provides-small-business-with-a-professional-mature-devops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab provides small business with a professional, mature DevOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Esther Shein\"}],\n        \"datePublished\": \"2022-05-19\",\n      }",{"title":2831,"description":2832,"authors":2837,"heroImage":2833,"date":2839,"body":2840,"category":741,"tags":2841},[2838],"Esther Shein","2022-05-19","\nBlonk is an international leader in the field of environmental and sustainability research in the agri-food sector. But as a small business without a QA team or a security team, the challenge was figuring out how to deliver professional software with only a few developers.\n\n[Blonk](https://blonksustainability.nl) used an external company to help set up what Bart Durlinger, product development manager, and software devevloper Pieter van de Vijver envisioned as its platform at the time. “They set up an environment on Amazon, a separate built server, a separate repository, and then some scripts in between to link it all together,” Durlinger recalls. “But when we decided to take more control, that was just too complex. We had too many different parts in many different places. We didn't have the capacity at the time to really oversee how this should all work together.”\n\nThat's when the Blonk team started looking for platforms that offered a more integrated approach, with project management, CI/CD, repository, and version control features all in one place.\n\n## Mature, with a modern vision of software development\n\nBlonk turned to GitLab after finding that the platform “had a lot of the things you need to have a professional delivery pipeline integrated into one solution,\" says Durlinger. At the time, the consultancy was using GitHub, which was more expensive, he says.\n\nWhen Blonk started with GitLab, the platform was free, which was a big factor in its selection, van de Vijver says. “But it was also an up-and-coming startup with a vision of that CI/CD integration built into how you envisioned the whole service itself,\" he says. “GitHub was more of a repository that might provide you with those things, but it required more manual setup.”\n \nBlonk liked that GitLab was a mature and stable solution “but still new enough to have a vision of how software is approached nowadays with easy setup and an integrated pipeline by default, and useful branching strategies by which you could support a multi-level, multi-stage deployment process easily,\" Van de Vijver says.\n\nAt the time Van de Vijver was the only one at Blonk with a background as a software developer, and another bonus was his familiarity with all the tools in GitLab. “By using GitLab, we could hit the ground running, and keep the scale small. You don't have to worry about all kinds of CI/CD operations and integrations and the configuration of that but use it just out of the box,” he says.\n\n## How Blonk is utilizing GitLab today\n\nCurrently, Blonk has 38 GitLab premium licenses, about half of which are used by software developers. The rest are used by data scientists, consultants, project managers, and others, so there are different ways the platform is utilized within the company; that also means there are different levels of software literacy but that hasn’t been an issue. The software development team has been onboarding very junior developers over the past couple of months, and “never have I had questions of how to do stuff in GitLab, because the platform is very intuitive,” Durlinger says.\n\nThe software development team has been integrated further into the core business, which also fits nicely with GitLab’s services, including the milestones Blonk uses as well as its repositories and project management strategies. “Also data scientists and methodology developers are now using GitLab projects for the project planning sometimes,” Durlinger notes.\n\nGitLab provided Blonk with a professional software environment for their developers. GitLab also lets the team use pre-built Docker images and a private Python package repository in their CI/CD pipelines, which means faster build times and easy integration, according to Durlinger. “That's a huge change because then we can distribute the work over multiple teams that can work independently on projects,” he says.\n\nThe platform’s automation features have also improved operational efficiency. “We don't need to communicate with external parties, or do any manual steps if we make code changes. We now are in control of managing our software and infrastructure deployment via CDK and gitlab-ci scripts, which makes it fully automated,” Durlinger explains.\n\nIn the project planning stage, Blonk is using GitLab issue templates to define issues, “and that also has really improved the quality of how we define issues to start with,\" Durlinger adds. Blonk has reaped huge benefits from the Agile capabilities of GitLab to plan, manage and monitor their workflows.  \n\nBlonk now has improved transparency and collaboration amongst their teams, and they are using the GitLab Wiki to build an internal knowledge base to optimize productivity and accelerate new developer onboarding.\n\nGitLab has supported the scaling of the developer team from 2 to now 16 developers, going from a single team to 3 software teams and a data science team, all using the One DevOps Platform bringing a much needed single DevOps workflow. Blonk is using the package registry, Docker integration with GitLab, and each team now deploys microservices on AWS. Teams are facilitated via GitLab with enhanced communication and a robust feedback loop.  \n\nProbably the biggest selling point of the platform is that it offers an integrated environment of all solutions related to code management and deployment – from container services to package registry services – everything Blonk wants to use in a pipeline and be able to manage privately, according to Durlinger. The fact that Blonk no longer has to use multiple tools in an ad hoc manner is another benefit. \n\n“What’s really nice is that our non-code artifacts live together with the code,\" Durlinger notes. “Our designs, methodology documents, and prototypes developed by data scientists can all be part of our Gitlab projects. This has improved workflow throughout the organization,” he says.\n\n## Looking ahead\n\n“As Blonk continues its business transformation, GitLab is helping the company maintain its reputation as a reliable and honest company,” Durlinger says. GitLab has added value to their employer brand and makes them more attractive to new developers to join: “It demonstrates that we have a professional environment for software engineers.”\n\nBlonk’s goal is to improve sustainability performance analysis, and ensure that the tools they are building have the same integrity and quality. “GitLab enables us to do this by having a professional project creation pipeline in place,\" Durlinger says.\n",[721,720,9],{"slug":2843,"featured":6,"template":700},"gitlab-provides-small-business-with-a-professional-mature-devops-platform","content:en-us:blog:gitlab-provides-small-business-with-a-professional-mature-devops-platform.yml","Gitlab Provides Small Business With A Professional Mature Devops Platform","en-us/blog/gitlab-provides-small-business-with-a-professional-mature-devops-platform.yml","en-us/blog/gitlab-provides-small-business-with-a-professional-mature-devops-platform",{"_path":2849,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2850,"content":2856,"config":2862,"_id":2864,"_type":14,"title":2865,"_source":16,"_file":2866,"_stem":2867,"_extension":19},"/en-us/blog/gitlab-serverless-with-cloudrun-for-anthos",{"title":2851,"description":2852,"ogTitle":2851,"ogDescription":2852,"noIndex":6,"ogImage":2853,"ogUrl":2854,"ogSiteName":685,"ogType":686,"canonicalUrls":2854,"schema":2855},"Announcing GitLab Serverless deploying to Cloud Run for Anthos","Discover how we're making it easier to deploy serverless workloads on-premise with Anthos.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666851/Blog/Hero%20Images/gitlab-serverless-blog.png","https://about.gitlab.com/blog/gitlab-serverless-with-cloudrun-for-anthos","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Announcing GitLab Serverless deploying to Cloud Run for Anthos\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mayank Tahilramani\"}],\n        \"datePublished\": \"2019-11-19\",\n      }",{"title":2851,"description":2852,"authors":2857,"heroImage":2853,"date":1896,"body":2859,"category":783,"tags":2860},[2858],"Mayank Tahilramani","\nThis week at Google Cloud Next ’19 UK, Google Cloud grew its Anthos product portfolio with the addition of Cloud Run for Anthos running on-prem. I’m excited to share that GitLab has been collaborating with Google Cloud product teams to support this launch and enable customers with CI/CD and [GitLab Serverless](/topics/serverless/) capabilities for quicker and easier adoption of serverless solutions. In the spirit of our partnership, our support for [Cloud Run for Anthos](https://cloud.google.com/run) is a continuation of our collaboration [announced earlier this year at Google Cloud Next ’19 in San Francisco](/blog/running-a-consistent-serverless-platform/), where we showed how you can deploy a serverless function to Cloud Run using the same developer workflow you’re already familiar with in GitLab. Now, we’re looking to bring that same UX and workflow consistency to Cloud Run deployments on Anthos running on-premise. Overall, together, GitLab and Google Cloud are aiming to lower the barrier of adoption for customers looking to architect scalable, cloud native solutions. \n\nHowever, when discussing cloud native, oftentimes ‘public cloud infrastructure’ comes to mind. But when I think of cloud native, I think of the various, modern ways of architecting scalable solutions, backed by managed services to make operations more convenient. Until very recently, infrastructure-centric managed services like Google Kubernetes Engine (GKE), Cloud Run, StackDriver, etc. have been traditionally associated with workloads running within cloud data centers. Given the recent announcements of [Google Cloud Anthos](https://cloud.google.com/blog/products/serverless/knative-based-cloud-run-services-are-ga), Google is clearly broadening the boundaries of cloud native across hybrid and heterogeneous environments, including customer data centers. As the infrastructure landscape diversifies, as application development intertwines with abstraction layers of managed services, and as workload flexibility becomes inherent with microservice containerization, the one thing you can rely on staying consistent is GitLab’s developer workflow to supplement all the above. In the context of all things [serverless](/topics/serverless/), let's take a closer look at what’s available today, what we’re still working on, and what that means for our users.\n\n## What’s available today\n\nGitLab serves as a single application for all of [DevOps](/topics/devops/), which includes building, deploying, and managing serverless applications. GitLab serverless enables developers to focus on writing application code without having to worry about Kubernetes or Knative YAML configuration. GitLab provides templates allowing developers to easily build and deploy Knative services that can be deployed to Cloud Run. Here is a [quick video walkthrough on the anatomy of a serverless project hosted in GitLab and deployed to Knative](https://youtu.be/IIM8JWhAbNk?t=210). With Google, you have a few options on how to leverage Cloud Run as a deployment target for GitLab CI/CD. As of this week, you can run Cloud Run in three different flavors: \n\n1. **Cloud Run**: This is a fully managed cloud service powered by Knative for serverless apps. GitLab supports deploying to Cloud Run and the full CI/CD workflow to leverage GitLab Runners to build and test functions. GitLab takes in the [`serverless.yml`](https://docs.gitlab.com/ee/update/removals.html) file within the root of your source code repository to define and deploy to Cloud Run.  \n\n2. **Cloud Run for Anthos running on Google Cloud**: This is a managed deployment of Knative on Anthos GKE clusters running on Google Cloud Platform. This enables you to install a managed Cloud Run deployment on top of your own Kubernetes cluster. Similar to above, GitLab also supports deploying to Cloud Run via the full CI/CD workflow, but as of right now, the highest version of Knative supported by GitLab is 0.7. Latest version support for Knative is coming in [GitLab 12.6](/releases/) on Dec. 22, 2019.  \n\n3. **Cloud Run for Anthos running on-premise**: Similar to above, this flavor of Cloud Run enables users to run a managed Cloud Run deployment on top of Anthos GKE On-Prem in your own data center. Currently, Knative v.0.9 is deployed in GKE-OP clusters. GitLab is soon to release support for Knative v0.9 and users can track the progress of this work in [this open issue](https://gitlab.com/gitlab-org/gitlabktl/issues/55) today. If you like what we’re working on, stop by and give us a thumbs up for feedback. So far, internal testing has been very positive and we look forward to formally supporting Cloud Run for Anthos running on-premise in the coming months/releases. The user experience will be almost identical to the prior two use cases listed above as you would expect.\n\n## Where to get started\n\nIf you’re interested in getting started with some sample code, check out our [documentation](https://docs.gitlab.com/ee/update/removals.html) and [sample app project](https://gitlab.com/knative-examples/functions) for reference. Additionally, [here is a walkthrough of deploying a demo app to Cloud Run from GitLab](https://youtu.be/lb_bRRAgEyc?t=1103). If you’re looking to get started with Serverless on Google Cloud Platform, [sign up for GitLab.com here](https://gitlab.com/users/sign_up) and then [sign up for $200 additional free GCP credits](https://cloud.google.com/partners/partnercredit/?PCN=a0n60000006Vpz4AAC).\n",[9,830,721,232,2861],"careers",{"slug":2863,"featured":6,"template":700},"gitlab-serverless-with-cloudrun-for-anthos","content:en-us:blog:gitlab-serverless-with-cloudrun-for-anthos.yml","Gitlab Serverless With Cloudrun For Anthos","en-us/blog/gitlab-serverless-with-cloudrun-for-anthos.yml","en-us/blog/gitlab-serverless-with-cloudrun-for-anthos",{"_path":2869,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2870,"content":2873,"config":2880,"_id":2882,"_type":14,"title":2883,"_source":16,"_file":2884,"_stem":2885,"_extension":19},"/en-us/blog/gitlab-ultimate-for-ibm-z-modern-devsecops-for-mainframes",{"noIndex":6,"description":2871,"title":2872},"A new offering from GitLab and IBM bridges mainframe and cloud-native development with seamless integration, CI/CD runner support, end-to-end visibility, and cost efficiency. ","GitLab Ultimate for IBM Z: Modern DevSecOps for mainframes",{"title":2872,"description":2871,"body":2874,"category":693,"tags":2875,"authors":2876,"heroImage":2878,"date":2879},"GitLab and IBM have partnered to solve a fundamental disconnect in enterprise development: enabling mainframe developers to work with the same modern tools, workflows, and collaboration features as their distributed counterparts. GitLab Ultimate for IBM Z, a GitLab-certified, integrated DevSecOps solution tailored for the mainframe environment, does just that — allowing organizations to modernize their mainframe development workflows by facilitating a seamless migration from outdated legacy library managers. With CI/CD pipelines running natively on IBM z/OS, customers experience accelerated innovation and reduced operational costs.\n\n## Challenges of today's mainframe development\n\nEnterprise organizations that use IBM Z systems for mission-critical workloads face challenges that conventional DevSecOps tools aren’t equipped to address. Cloud-native teams benefit from modern [CI/CD](https://about.gitlab.com/topics/ci-cd/) pipelines, collaborative development, and automated testing. In contrast, mainframe teams are often left behind — stuck with outdated tools that lead to costly inefficiencies and operational silos.\n\nTeams often resort to workarounds, such as SSH connections and manual file transfers, which create security vulnerabilities and audit difficulties. When compliance requirements are stringent, these improvised solutions become unacceptable risks. Meanwhile, organizations maintain expensive parallel toolchains, with legacy mainframe development tools carrying premium licensing costs while delivering limited functionality compared to modern alternatives.\n\nThis fragmentation creates two problems: slower delivery cycles and difficulty attracting developers who expect modern development experiences.\n\n> **\"GitLab Ultimate for IBM Z represents an important step in addressing a long-standing industry challenge. IDC research shows that mainframe developers often work with legacy tooling that contributes to delivery inefficiencies and makes it harder to attract new talent. With this offering, modern DevSecOps capabilities and unified workflows are brought directly to the mainframe. This empowers developers to work more collaboratively and efficiently, while helping organizations accelerate innovation and integrate mainframe development into broader digital transformation strategies.\"** - Katie Norton, Research Manager, DevSecOps and Software Supply Chain Security at IDC\n\n## Unified development environments\n\nTrue modernization means more than just updating mainframe development. It means creating a unified platform where mainframe, cloud-native, web, and mobile development teams collaborate seamlessly.\n\nGitLab Ultimate for IBM Z enables developers to use consistent workflows whether they're deploying to z/OS, cloud, or on-premises infrastructure — knowledge transfers between teams instead of staying siloed. Organizations can modernize incrementally without business disruption, as legacy systems continue operating while teams adopt modern practices at their own pace.\n\nAs organizations pursue hybrid cloud strategies, GitLab provides the foundation for applications that span mainframe and cloud-native environments.\n\n## What is GitLab Ultimate for IBM Z?\n\nGitLab Ultimate for IBM Z delivers native z/OS Runner support, enabling seamless CI/CD pipeline execution directly on your mainframe infrastructure. This GitLab-certified solution helps eliminate the need for complex workarounds while maintaining the security and reliability your enterprise applications demand.\n\nThe combination of GitLab's comprehensive DevSecOps platform with IBM's deep mainframe expertise creates something unique in the market: a certified solution that provides a true bridge between enterprise legacy systems and cloud-native innovation.\n\n## GitLab Ultimate for IBM Z capabilities\n\nGitLab Ultimate for IBM Z provides enterprise teams with the tools they need to modernize mainframe development while preserving critical business systems.\n\n**Native z/OS Runner support** helps eliminate security risks and scalability bottlenecks associated with remote connections, while accelerating delivery through CI/CD pipelines that execute directly where your mainframe code resides.\n\n**Unified Source Code Management** modernizes your toolchain by replacing expensive legacy library managers with GitLab's searchable, version-controlled repository system, helping reduce licensing costs and maintenance overhead.\n\n**Seamless integration** with IBM Developer for z/OS Enterprise Edition (IDzEE) delivers faster software releases through dependency-based builds, automated code scanning, and comprehensive debugging tools within familiar developer environments, enhancing both quality and security.\n\n**End-to-end visibility** across mainframe and distributed environments provides comprehensive project management from planning to production, enabling automated DevOps workflows that help retain talent through modern, next-generation development tools.\n\n## Modernize your mainframe development environment today\n\nGitLab Ultimate for IBM Z is available now for organizations ready to transform their mainframe development experience. To learn more, visit the [GitLab and IBM partnership page](https://about.gitlab.com/partners/technology-partners/ibm/).",[283,693,9,696],[2522,2877],"Andy Bradfield","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750440008/myqt5vcjlffh8sszw507.png","2025-06-23",{"featured":91,"template":700,"slug":2881},"gitlab-ultimate-for-ibm-z-modern-devsecops-for-mainframes","content:en-us:blog:gitlab-ultimate-for-ibm-z-modern-devsecops-for-mainframes.yml","Gitlab Ultimate For Ibm Z Modern Devsecops For Mainframes","en-us/blog/gitlab-ultimate-for-ibm-z-modern-devsecops-for-mainframes.yml","en-us/blog/gitlab-ultimate-for-ibm-z-modern-devsecops-for-mainframes",{"_path":2887,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2888,"content":2893,"config":2898,"_id":2900,"_type":14,"title":2901,"_source":16,"_file":2902,"_stem":2903,"_extension":19},"/en-us/blog/gitlabs-maven-dependency-proxy-is-available-in-beta",{"title":2889,"description":2890,"ogTitle":2889,"ogDescription":2890,"noIndex":6,"ogImage":1260,"ogUrl":2891,"ogSiteName":685,"ogType":686,"canonicalUrls":2891,"schema":2892},"GitLab's Maven dependency proxy is available in Beta","Enterprises can use new package registry feature to consolidate artifact management on GitLab, increasing the efficiency and speed of CI/CD pipelines.","https://about.gitlab.com/blog/gitlabs-maven-dependency-proxy-is-available-in-beta","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's Maven dependency proxy is available in Beta\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2023-12-11\",\n      }",{"title":2889,"description":2890,"authors":2894,"heroImage":1260,"date":2895,"body":2896,"category":1062,"tags":2897},[1122],"2023-12-11","GitLab is introducing the Maven dependency proxy, a new feature that will enable enterprises to consolidate on the DevSecOps platform for artifact management. The Maven dependency proxy, [available in Beta](https://gitlab.com/groups/gitlab-org/-/epics/3610), enables larger organizations to be more efficient by expanding the functionality of GitLab's package registry. The new feature can make pipelines faster and more reliable, and can reduce the cost of data transfer since over time most packages will be pulled from the cache.\n\n## How the Maven dependency proxy works\n\nA typical software project relies on a variety of dependencies, which we call packages. Packages can be internally built and maintained, or sourced from a public repository. Based on our user research, we’ve learned that most projects use a 50/50 mix of public vs. private packages. When installing packages, the order in which they are found and downloaded is very important, as downloading or using an incorrect package or version of a package can introduce breaking changes and security vulnerabilities into their pipelines.\n\nThe Maven dependency proxy gives users the ability to add or configure one external Java repository. Once added, when a user tries to install a Java package using their project-level endpoint, GitLab will first look for the package in the project and if it's not found, will attempt to pull the package from the external repository.\n\nWhen a package is pulled from the external repository, it will be imported into the GitLab project so that the next time that particular package/version is pulled it's pulled from GitLab and not the external repository. If the external repository is having connectivity issues and the package is present in the dependency proxy, then pulling that package will work. This will make your pipelines faster and more reliable.\n\nIf the package changes in the external repository — for example, a user deletes a version and publishes a new one with different files — the dependency proxy will detect that and invalidate the package in GitLab to pull the \"newer\" one. This will ensure that the correct packages are downloaded and help to reduce security vulnerabilities. \nIf the package is not found in their GitLab project or the external repository, GitLab will return an error.\n\nHere are more details of the Maven dependency proxy:\n- This feature and all future dependency proxy formats will be in the Premium tier.\n- Project owners will be able to configure this feature via a project's settings (API or UI).\n- We will support external repositories that require authentication, such as Artifactory or Sonatype.\n\n## A fit for the enterprise\n\nEnterprise organizations that need to consolidate on GitLab and move away from Artifactory or Sonatype can make use of the new Maven dependency proxy. Virtual registries allow you to publish, proxy, and cache multiple package repositories behind a single, logical URL. \n\nThe Maven dependency proxy is the MVC of a set of features that will help enterprise organizations sunset their existing artifact management vendors, such as Artifactory or Sonatype Nexus, to help reduce costs and improve the developer user experience.\n\n#### Roadmap\n- [Finish the Maven dependency proxy](https://gitlab.com/groups/gitlab-org/-/epics/3610) (Milestone 16.7)\n- [npm dependency proxy](https://gitlab.com/groups/gitlab-org/-/epics/3608) \n- [Make the dependency proxy for containers work generically with any container registry](https://gitlab.com/groups/gitlab-org/-/epics/6061)\n- [PyPI dependency proxy](https://gitlab.com/groups/gitlab-org/-/epics/3612)\n- [NuGet dependency proxy](https://gitlab.com/groups/gitlab-org/-/epics/3611)\n\n## How we will measure success\n\nWe will start to measure success by tracking adoption by tier with the following metrics:\n\n- Number of packages pulled through the dependency proxy\n- The hit ratio (packages pulled from the cache vs. upstream repository)\n- Number of users that pulled a package through the dependency proxy\n\n## How to get started\n\nIn the video below, you can see a short demo of the Maven dependency proxy in action.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/9NPTXObsSrE?si=MFWg5C9j5a97LBeE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Prerequisites\n\n- As of the time of writing this, the feature is behind a feature flag.\n- The settings for your project must be updated using [GraphQL](https://gitlab.com/-/graphql-explorer).\n\n> Join the Beta program by adding a comment to [this epic](https://gitlab.com/groups/gitlab-org/-/epics/3610). Note: The feature is planned to go to general availability in Version 16.7 or 16.8.\n",[1062,9,828,696],{"slug":2899,"featured":91,"template":700},"gitlabs-maven-dependency-proxy-is-available-in-beta","content:en-us:blog:gitlabs-maven-dependency-proxy-is-available-in-beta.yml","Gitlabs Maven Dependency Proxy Is Available In Beta","en-us/blog/gitlabs-maven-dependency-proxy-is-available-in-beta.yml","en-us/blog/gitlabs-maven-dependency-proxy-is-available-in-beta",{"_path":2905,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2906,"content":2912,"config":2917,"_id":2919,"_type":14,"title":2920,"_source":16,"_file":2921,"_stem":2922,"_extension":19},"/en-us/blog/gitops-next-big-thing-automation",{"title":2907,"description":2908,"ogTitle":2907,"ogDescription":2908,"noIndex":6,"ogImage":2909,"ogUrl":2910,"ogSiteName":685,"ogType":686,"canonicalUrls":2910,"schema":2911},"Is GitOps the next big thing in automation?","We polled our community on Twitter to ask about GitOps. Here is what we found.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681428/Blog/Hero%20Images/iac-gitops-blog-post_with-gl-logo.png","https://about.gitlab.com/blog/gitops-next-big-thing-automation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Is GitOps the next big thing in automation?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-07-14\",\n      }",{"title":2907,"description":2908,"authors":2913,"heroImage":2909,"date":2914,"body":2915,"category":1040,"tags":2916},[715],"2020-07-14","\n\nInfrastructure management isn’t a new problem. After all, AWS has been publicly available since 2006. While the software development lifecycle is mostly automated, infrastructure remains a largely manual process that requires specialized teams. Infrastructure needs to be elastic, and automation would make that a much easier process than it is today.\n\n[GitOps](/topics/gitops/) is an emerging technology term that could be the answer many infrastructure teams have been searching for. At its core, GitOps is a process that helps teams automate IT infrastructure through processes they already use in application development.\n\nIt’s a framework we’re excited about. Naturally, we took it to Twitter.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">Where are YOU at with \u003Ca href=\"https://twitter.com/hashtag/GitOps?src=hash&amp;ref_src=twsrc%5Etfw\">#GitOps\u003C/a>?\u003C/p>&mdash; GitLab (@gitlab) \u003Ca href=\"https://twitter.com/gitlab/status/1277595216468418560?ref_src=twsrc%5Etfw\">June 29, 2020\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n## What is GitOps?\n\nWhat makes [GitOps](/solutions/gitops/) unique is that it’s not a single product, plugin, or platform. Before we dive into what we can learn from these results, let’s define what exactly GitOps _is_.\n\nAt GitLab, we define GitOps as this:\n\n>GitOps is an operational framework that takes [DevOps](/topics/devops/) best practices used for application development such as version control, collaboration, compliance, and CI/CD, and applies them to infrastructure automation.\n\nGitOps happens in the same version control system as application development, enabling teams to collaborate more in a central location while benefiting from all the [built-in features of Git](https://devops.com/an-inside-look-at-gitops/). Infrastructure teams that practice GitOps use configuration files stored as code ([infrastructure as code](/topics/gitops/infrastructure-as-code/)).\n\nInfrastructure teams then take IaC and make changes using [merge requests](/blog/future-merge-requests-realtime-collab/) (MRs). Once changes are reviewed and approved, they are deployed using a CI/CD pipeline. With infrastructure changes codified, repeatable, and traceable, it leaves less room for human error and gets everyone on the same page.\n\n>GitOps = IaC + MRs + CI/CD\n\nWe thought it would be interesting to reach out to our Twitter followers to see just how many people are exploring this framework, or maybe haven’t heard of it at all. Here’s what we gleaned from our poll.\n\n## 23.8% use GitOps today\n\nWhile we have to admit that GitLab followers are probably going to be a sophisticated group, numbers like this are still very encouraging. If almost a quarter of respondents are using this new framework, it tells us that GitOps is a viable way of automating infrastructure.\n\n## 10.6% plan to implement GitOps\n\nImplementing a new process can be difficult, even for the most organized teams. GitOps allows for greater collaboration, but that is not necessarily something that comes naturally. For infrastructure teams used to making quick, manual changes, this new process is a big departure. If more than 10% of respondents are looking to get started with GitOps, we can help them understand what goes into adopting the new framework.\n\n## 11.6% have looked but not committed to GitOps\n\nThis kind of “shopping cart abandonment” differs from the type we’re most familiar with, but it has some similarities. For those that have heard of GitOps, what prevented them from implementing it and what hurdles did they anticipate?\n\nGitOps principles can be applied to all types of infrastructure automation, including VMs and containers, and can be very effective for teams looking to manage [Kubernetes clusters](/solutions/kubernetes/). But there might be some confusion on whether Kubernetes is required for GitOps (it’s not). Still, over 11% of respondents are familiar with GitOps but may not understand how it can apply to them.\n\n## 54% haven’t explored GitOps yet\n\nSince GitOps is still emerging, it’s not surprising that more than half of the respondents haven’t explored it yet. GitOps is an exciting topic because it offers automation using many of the same tools organizations already use, but before committing to a brand new process, it’s important for organizations to know how it works.\n\nCollaboration is part of what makes DevOps so effective, and [GitOps brings that same spirit of code collaboration into the infrastructure provisioning process](/topics/gitops/gitops-gitlab-collaboration/). Managing infrastructure through the same version control system used for application development brings a new level of transparency across the entire organization.\n\nAs we continue to explore GitOps, information like this poll lets us know where the community is in the adoption of new processes. Could GitOps be the next big thing in automation?\n\nIf you’d like to learn more about GitOps and how it works, check out this panel with GitOps experts from [Weaveworks](https://www.weave.works), [HashiCorp](https://www.hashicorp.com), [Ansible](https://www.ansible.com), and GitLab where we discuss:\n\n*   How GitOps is changing the landscape of infrastructure management\n*   What successful GitOps looks like\n*   What teams need to get started on their GitOps journey\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nWatch GitLab's [GitOps expert panel](/why/gitops-infrastructure-automation/) webcast\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n\n**Read more about infrastructure:**\n\n[Why GitOps should be the workflow of choice](/blog/why-gitops-should-be-workflow-of-choice/)\n\n[How to use GitLab and Ansible to create infrastructure as code](/blog/using-ansible-and-gitlab-as-infrastructure-for-code/)\n\n[How infrastructure teams use GitLab and Terraform for GitOps](/topics/gitops/gitlab-enables-infrastructure-as-code/)\n",[9,721,268],{"slug":2918,"featured":6,"template":700},"gitops-next-big-thing-automation","content:en-us:blog:gitops-next-big-thing-automation.yml","Gitops Next Big Thing Automation","en-us/blog/gitops-next-big-thing-automation.yml","en-us/blog/gitops-next-big-thing-automation",{"_path":2924,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2925,"content":2931,"config":2937,"_id":2939,"_type":14,"title":2940,"_source":16,"_file":2941,"_stem":2942,"_extension":19},"/en-us/blog/go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss",{"title":2926,"description":2927,"ogTitle":2926,"ogDescription":2927,"noIndex":6,"ogImage":2928,"ogUrl":2929,"ogSiteName":685,"ogType":686,"canonicalUrls":2929,"schema":2930},"Go tools and GitLab: How to do continuous integration like a boss","How the team at Pantomath makes their lives easier with GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667978/Blog/Hero%20Images/go-tools-and-gitlab.jpg","https://about.gitlab.com/blog/go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Go tools and GitLab: How to do continuous integration like a boss\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Julien Andrieux\"}],\n        \"datePublished\": \"2017-11-27\",\n      }",{"title":2926,"description":2927,"authors":2932,"heroImage":2928,"date":2934,"body":2935,"category":718,"tags":2936},[2933],"Julien Andrieux","2017-11-27","At [Pantomath](https://pantomath.io/), we use [GitLab](/) for all our\ndevelopment work. The purpose of this paper is not to present GitLab and all\n[its features](/pricing/feature-comparison/), but to introduce how we use these tools to ease\nour lives. So what is it all about? To automate everything that is related\nto your development project, and let you focus on your code.\n\n\n\u003C!-- more -->\n\n\nWe’ll cover the [lint](https://en.wikipedia.org/wiki/Lint_(software)), [unit\ntests](https://en.wikipedia.org/wiki/Unit_testing), [data\nrace](https://en.wikipedia.org/wiki/Race_condition), [memory\nsanitizer](https://clang.llvm.org/docs/MemorySanitizer.html), [code\ncoverage](https://en.wikipedia.org/wiki/Code_coverage), and build.\n\n\nAll the source code shown in this post is available at\n[gitlab.com/pantomath-io/demo-tools](https://gitlab.com/pantomath-io/demo-tools).\nSo feel free to get the repository, and use the tags to navigate in it. The\nrepository should be placed in the `src` folder of your `$GOPATH`:\n\n\n```bash\n\n$ go get -v -d gitlab.com/pantomath-io/demo-tools\n\n$ cd $GOPATH/src/gitlab.com/pantomath-io/demo-tools\n\n```\n\n\n### Go tools\n\n\nLuckily, `Go` — the open source programming language also known as golang —\ncomes with a [lot of useful tools](https://golang.org/cmd/go/), to build,\ntest, and check your code. In fact, it’s all there. We’ll just add extra\ntools to glue them together. But before we go there, we need to take them\none by one, and see what they do.\n\n\n#### Package list\n\n\nYour Go project is a collection of packages, as described in the [official\ndoc](https://golang.org/doc/code.html). Most of the following tools will be\nfed with these packages, and thus the first command we need is a way to list\nthe packages. Hopefully, the `Go` language covers our back with the `list`\nsubcommand ([read the fine\nmanual](https://golang.org/cmd/go/#hdr-List_packages) and this [excellent\npost from Dave\nCheney](https://dave.cheney.net/2014/09/14/go-list-your-swiss-army-knife)):\n\n\n```bash\n\n$ go list ./...\n\n```\n\n\nNote that we want to avoid applying our tools on external packages or\nresources, and restrict it to **our** code. So we need to get rid of the\n[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories):\n\n\n```bash\n\n$ go list ./... | grep -v /vendor/\n\n```\n\n\n#### Lint\n\n\nThis is the very first tool we use on the code: the linter. Its role is to\nmake sure that the code respects the code style. This may sounds like an\noptional tool, or at least a “nice-to-have” but it really helps to keep\nconsistent style over your project.\n\n\nThis linter is not part of Go *per se*, so you need to grab it and install\nit by hand (see [official doc](https://github.com/golang/lint)).\n\n\nThe usage is fairly simple: you just run it on the packages of your code\n(you can also point the `.go` files):\n\n\n```bash\n\n$ golint -set_exit_status $(go list ./... | grep -v /vendor/)\n\n```\n\n\nNote the `-set_exit_status` option. By default, `golint` only prints the\nstyle issues, and returns (with a 0 return code), so the CI never considers\nsomething went wrong. If you specify the `-set_exit_status`, the return code\nfrom `golint` will be different from 0 if any style issue is encountered.\n\n\n#### Unit test\n\n\nThese are the most common tests you can run on your code. For each `.go`\nfile, we need to have an associated `_test.go` file holding the unit tests.\nYou can run the tests for all the packages with the following command:\n\n\n```bash\n\n$ go test -short $(go list ./... | grep -v /vendor/)\n\n```\n\n\n#### Data race\n\n\nThis is usually a hard subject to cover, but the `Go` tool has it by default\n(but only available on `linux/amd64`, `freebsd/amd64`, `darwin/amd64` and\n`windows/amd64`). For more information about data race, see [this\narticle](https://golang.org/doc/articles/race_detector.html). Meanwhile,\nhere is how to run it:\n\n\n```bash\n\n$ go test -race -short $(go list ./... | grep -v /vendor/)\n\n```\n\n\n#### Memory sanitizer\n\n\nClang has a nice detector for uninitialized reads called\n[MemorySanitizer](https://clang.llvm.org/docs/MemorySanitizer.html). The `go\ntest` tool is kind enough to interact with this Clang module (as soon as you\nare on `linux/amd64` host and using a recent version of Clang/LLVM\n(`>=3.8.0`). This command is how to run it:\n\n\n```bash\n\n$ go test -msan -short $(go list ./... | grep -v /vendor/)\n\n```\n\n\n#### Code coverage\n\n\nThis is also a must have to evaluate the health of your code, and see what\nthe part of code is under unit tests and what part is not. [Rob\nPike](https://twitter.com/rob_pike) wrote a [full post on that very\nsubject](https://blog.golang.org/cover).\n\n\nTo calculate the code coverage ratio, we need to run the following script:\n\n\n```bash\n\n$ PKG_LIST=$(go list ./... | grep -v /vendor/)\n\n$ for package in ${PKG_LIST}; do\n    go test -covermode=count -coverprofile \"cover/${package##*/}.cov\" \"$package\" ;\ndone\n\n$ tail -q -n +2 cover/*.cov >> cover/coverage.cov\n\n$ go tool cover -func=cover/coverage.cov\n\n```\n\n\nIf we want to get the coverage report in HTML format, we need to add the\nfollowing command:\n\n\n```bash\n\n$ go tool cover -html=cover/coverage.cov -o coverage.html\n\n```\n\n\n#### Build\n\n\nLast but not least, once the code has been fully tested, we might want to\ncompile it to make sure we can build a working binary.\n\n\n```bash\n\n$ go build -i -v gitlab.com/pantomath-io/demo-tools\n\n```\n\n\n### Makefile\n\n\n*git tag:*\n[init-makefile](https://gitlab.com/pantomath-io/demo-tools/tags/init-makefile)\n\n\n![](https://cdn-images-1.medium.com/max/1600/1*Ip_q_6I-kNpUjuPMOutuTA.jpeg)\n\n*\u003Csmall>Photo by [Matt\nArtz](https://unsplash.com/photos/qJE5Svhs2ek?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non\n[Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\u003C/small>*\n\n\nNow we have all the tools that we may use in the context of continuous\nintegration, we can wrap them all in a\n[Makefile](https://gitlab.com/pantomath-io/demo-tools/blob/init-makefile/Makefile),\nand have a consistent way to call them.\n\n\nThe purpose of this doc is not to present `make`, but you can refer to\n[official documentation](https://www.gnu.org/software/make/manual/make.html)\nto learn more about it.\n\n    PROJECT_NAME := \"demo-tools\"\n    PKG := \"gitlab.com/pantomath-io/$(PROJECT_NAME)\"\n    PKG_LIST := $(shell go list ${PKG}/... | grep -v /vendor/)\n    GO_FILES := $(shell find . -name '*.go' | grep -v /vendor/ | grep -v _test.go)\n\n    .PHONY: all dep build clean test coverage coverhtml lint\n\n    all: build\n\n    lint: ## Lint the files\n      @golint -set_exit_status ${PKG_LIST}\n\n    test: ## Run unittests\n      @go test -short ${PKG_LIST}\n\n    race: dep ## Run data race detector\n      @go test -race -short ${PKG_LIST}\n\n    msan: dep ## Run memory sanitizer\n      @go test -msan -short ${PKG_LIST}\n\n    coverage: ## Generate global code coverage report\n      ./tools/coverage.sh;\n\n    coverhtml: ## Generate global code coverage report in HTML\n      ./tools/coverage.sh html;\n\n    dep: ## Get the dependencies\n      @go get -v -d ./...\n\n    build: dep ## Build the binary file\n      @go build -i -v $(PKG)\n\n    clean: ## Remove previous build\n      @rm -f $(PROJECT_NAME)\n\n    help: ## Display this help screen\n      @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = \":.*?## \"}; {printf \"\\033[36m%-30s\\033[0m %s\\n\", $$1, $$2}'\n\nWhat do we have now? One target for any tool previously presented, and three\nmore targets for:\n\n\n* installation of dependencies (`dep`);\n\n* housekeeping of the project (`clean`);\n\n* some nice and shiny help (`help`).\n\n\nNote that we also had to create a script for the code coverage work. This is\nbecause implementing loops over files in a Makefile is a pain. So the work\nis done in a `bash` script, and the Makefile only triggers this script.\n\n\nYou can try the Makefile with the following commands:\n\n    $ make help\n    $ make lint\n    $ make coverage\n\n### Continuous integration\n\n\n*git tag:*\n[init-ci](https://gitlab.com/pantomath-io/demo-tools/tags/init-ci)\n\n\nNow the tools are in place, and we can run various tests on our code, we’d\nlike to automate these, on your repository. Luckily, GitLab offers [CI\npipelines](/solutions/continuous-integration/) just for this. And the setup\nfor this is pretty straightforward: all you create is a `.gitlab-ci.yml`\nfile at the root of the repository.\n\n\nThe [full documentation](https://docs.gitlab.com/ee/ci/yaml/) on this Yaml\nfile presents all the options, but you can start with this `.gitlab-ci.yml`:\n\n\n```yaml\n\nimage: golang:1.9\n\n\ncache:\n  paths:\n    - /apt-cache\n    - /go/src/github.com\n    - /go/src/golang.org\n    - /go/src/google.golang.org\n    - /go/src/gopkg.in\n\nstages:\n  - test\n  - build\n\nbefore_script:\n  - mkdir -p /go/src/gitlab.com/pantomath-io /go/src/_/builds\n  - cp -r $CI_PROJECT_DIR /go/src/gitlab.com/pantomath-io/pantomath\n  - ln -s /go/src/gitlab.com/pantomath-io /go/src/_/builds/pantomath-io\n  - make dep\n\nunit_tests:\n  stage: test\n  script:\n    - make test\n\nrace_detector:\n  stage: test\n  script:\n    - make race\n\nmemory_sanitizer:\n  stage: test\n  script:\n    - make msan\n\ncode_coverage:\n  stage: test\n  script:\n    - make coverage\n\ncode_coverage_report:\n  stage: test\n  script:\n    - make coverhtml\n  only:\n  - master\n\nlint_code:\n  stage: test\n  script:\n    - make lint\n\nbuild:\n  stage: build\n  script:\n    - make\n```\n\n\nIf you break down the file, here are some explanations on its content:\n\n\n* The first thing is to choose what Docker image will be used to run the CI.\nHead to the [Docker Hub](https://hub.docker.com/) to choose the right image\nfor your project.\n\n* Then, you specify some folders of this image [to be\ncached](https://docs.gitlab.com/ee/ci/yaml/#cache). The goal here is to\navoid downloading the same content several times. Once a job is completed,\nthe listed paths will be archived, and next job will use the same archive.\n\n* You define the different `stages` that will group your jobs. In our case,\nwe have two [stages](https://docs.gitlab.com/ee/ci/yaml/#stages) (to be\nprocessed in that order): `test` and `build`. We could have other stages,\nsuch as `deploy`.\n\n* The `before_script`\n[section](https://docs.gitlab.com/ee/ci/yaml/#before_script) defines the\ncommands to run in the Docker container right before the job is actually\ndone. In our context, the commands just copy or link the repository deployed\nin the `$GOPATH`, and install dependencies.\n\n* Then come the actual [jobs](https://docs.gitlab.com/ee/ci/jobs/), using\nthe `Makefile` targets. Note the special case for `code_coverage_report`\nwhere execution is restricted to the `master` branch (we don’t want to\nupdate the code coverage report from feature branches for instance).\n\n\nAs we commit/push the `.gitlab-ci.yml` file in the repository, the CI is\n[automatically\ntriggered](https://gitlab.com/pantomath-io/demo-tools/pipelines/13481935).\nAnd the pipeline fails. Howcome?\n\n\nThe `lint_code`\n[job](https://gitlab.com/pantomath-io/demo-tools/-/jobs/38690212) fails\nbecause it can’t find the `golint` binary:\n\n\n```bash\n\n$ make lint\n\nmake: golint: Command not found\n\nMakefile:11: recipe for target 'lint' failed\n\nmake: *** [lint] Error 127\n\n```\n\n\nSo,\n[update](https://gitlab.com/pantomath-io/demo-toolscommit/17a0206eb626504e559f56773e2d81c7b5808dbe)\nyour `Makefile` to install `golint` as part of the `dep` target.\n\n\nThe `memory_sanitizer`\n[job](https://gitlab.com/pantomath-io/demo-tools/-/jobs/38690209) fails\nbecause `gcc` complains:\n\n\n```bash\n\n$ make msan\n\n# runtime/cgo\n\ngcc: error: unrecognized argument to -fsanitize= option: 'memory'\n\nMakefile:20: recipe for target 'msan' failed\n\nmake: *** [msan] Error 2\n\n```\n\n\nBut remember we need to use Clang/LLVM `>=3.8.0` to enjoy the `-msan` option\nin `go test` command.\n\n\nWe have two options here:\n\n\n* either we set up Clang in the job (using `before_script`);\n\n* or we use a Docker image with Clang installed by default.\n\n\nThe first option is nice, but that implies to have this setup done **for\nevery single job**. This is going to be so long, we should do it once and\nfor all. So we prefer the second option, which is a good way to play with\n[GitLab\nRegistry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html).\n\n\n*git tag:*\n[use-own-docker](https://gitlab.com/pantomath-io/demo-tools/tags/use-own-docker)\n\n\nWe need to create a\n[Dockerfile](https://gitlab.com/pantomath-io/demo-tools/blob/use-own-docker/Dockerfile)\nfor the container (as usual: read the [official\ndocumentation](https://docs.docker.com/engine/reference/builder) for more\noptions about it):\n\n    # Base image:\n    FROM golang:1.9\n    MAINTAINER Julien Andrieux \u003Cjulien@pantomath.io>\n\n    # Install golint\n    ENV GOPATH /go\n    ENV PATH ${GOPATH}/bin:$PATH\n    RUN go get -u github.com/golang/lint/golint\n\n    # Add apt key for LLVM repository\n    RUN wget -O -\n     | apt-key add -\n\n    # Add LLVM apt repository\n    RUN echo \"deb\n     llvm-toolchain-stretch-5.0 main\" | tee -a /etc/apt/sources.list\n\n    # Install clang from LLVM repository\n    RUN apt-get update && apt-get install -y --no-install-recommends \\\n        clang-5.0 \\\n        && apt-get clean \\\n        && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*\n\n    # Set Clang as default CC\n    ENV set_clang /etc/profile.d/set-clang-cc.sh\n    RUN echo \"export CC=clang-5.0\" | tee -a ${set_clang} && chmod a+x ${set_clang}\n\nThe container built out of this Dockerfile will be based on\n[golang:1.9](https://hub.docker.com/_/golang/) image (the one referenced in\nthe `.gitlab-ci.yml` file).\n\n\nWhile we’re at it, we install `golint` in the container, so we have it\navailable. Then we follow [official way](http://apt.llvm.org/) of installing\nClang 5.0 from LLVM repository.\n\n\nNow we have the Dockerfile in place, we need to build the container image\nand make it available for GitLab:\n\n\n```bash\n\n$ docker login registry.gitlab.com\n\n$ docker build -t registry.gitlab.com/pantomath-io/demo-tools .\n\n$ docker push registry.gitlab.com/pantomath-io/demo-tools\n\n```\n\n\nThe first command connects you to the GitLab Registry. Then you build the\ncontainer image described in the Dockerfile. And finally, you push it to the\nGitLab Registry.\n\n\nTake a look at the [Registry for your\nrepository](https://gitlab.com/pantomath-io/demo-tools/container_registry),\nyou’ll see your image, ready to be used. And to have the CI using your\nimage, you just need to update the `.gitlab-ci.yml` file:\n\n    image: golang:1.9\n\nbecomes\n\n    image: registry.gitlab.com/pantomath-io/demo-tools:latest\n\nOne last detail: you need to tell the CI to use the proper compiler (i.e.\nthe `CC` environment variable), so we add the variable initialization in the\n`.gitlab-ci.yml` file:\n\n    export CC=clang-5.0\n\nOnce the modification are done, next commit will trigger the pipeline, which\nnow works:\n\n\n[gitlab.com/pantomath-io/demo-tools/pipelines/13497136](https://gitlab.com/pantomath-io/demo-tools/pipelines/13497136)\n\n\n### Badges\n\n\n*git tag:*\n[init-badges](https://gitlab.com/pantomath-io/demo-tools/tags/init-badges)\n\n\n![](https://cdn-images-1.medium.com/max/1600/1*0pY_6oCiHZ_eLh0vfg5rDA.jpeg)\n\n\n*\u003Csmall>Photo by [Jakob\nOwens](https://unsplash.com/photos/ZBadHaTUkP0?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non\n[Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\u003C/small>*\n\n\nNow the tools are in place, every commit will launch a test suite, and you\nprobably want to show it, and that’s legitimate :) The best way to do so is\nto use badges, and the best place for it is the `README`\n[file](https://gitlab.com/pantomath-io/demo-tools/blob/init-badges/README.md).\n\n\nEdit it and add the four following badges:\n\n\n* Build Status: the status of the last pipeline on the `master` branch:\n\n\n```\n\n[![Build\nStatus](https://gitlab.com/pantomath-io/demo-tools/badges/master/build.svg)](https://gitlab.com/pantomath-io/demo-tools/commits/master)\n\n```\n\n\n* Coverage Report: the percentage of source code covered by tests\n\n\n```\n\n[![Coverage\nReport](https://gitlab.com/pantomath-io/demo-tools/badges/master/coverage.svg)](https://gitlab.com/pantomath-io/demo-tools/commits/master)\n\n```\n\n\n* Go Report Card:\n\n\n```\n\n[![Go Report\nCard](https://goreportcard.com/badge/gitlab.com/pantomath-io/demo-tools)](https://goreportcard.com/report/gitlab.com/pantomath-io/demo-tools)\n\n```\n\n\n* License:\n\n\n```\n\n[![License\nMIT](https://img.shields.io/badge/License-MIT-brightgreen.svg)](https://img.shields.io/badge/License-MIT-brightgreen.svg)\n\n```\n\n\nThe coverage report needs a special configuration. You need to tell GitLab\nhow to get that information, considering that there is a job in the CI that\n*displays* it when it runs.\u003Cbr> There is a\n[configuration](https://gitlab.com/help/user/project/pipelines/settings#test-coverage-parsing)\nto provide GitLab with a regexp, used in any job’ output. If the regexp\nmatches, GitLab consider the match to be the code coverage result.\n\n\nSo head to `Settings > CI/CD` in your repository, scroll down to the `Test\ncoverage parsing` setting in the `General pipelines settings` section, and\nuse the following regexp:\n\n    total:\\s+\\(statements\\)\\s+(\\d+.\\d+\\%)\n\nYou’re all set! Head to the [overview of your\nrepository](https://gitlab.com/pantomath-io/demo-tools/tree/init-badges),\nand look at your `README`:\n\n\n### Conclusion\n\n\nWhat’s next? Probably more tests in your CI. You can also look at the CD\n([Continuous\nDeployment](https://docs.gitlab.com/ee/ci/environments/index.html)) to\nautomate the deployment of your builds. The documentation can be done using\n[GoDoc](https://godoc.org/-/about). Note that you generate a coverage report\nwith the `code_coverage_report`, but don’t use it in the CI. You can make\nthe job copy the HTML file to a web server, using `scp` (see this\n[documentation](https://docs.gitlab.com/ee/ci/ssh_keys/) on how to use SSH\nkeys).\n\n\nMany thanks to [Charles Francoise](https://dev.to/loderunner) who co-wrote\nthis paper and\n[gitlab.com/pantomath-io/demo-tools](https://gitlab.com/pantomath-io/demo-tools).\n\n\n## About the Guest Author\n\n\nJulien Andrieux is currently working on Pantomath. Pantomath is a modern,\nopen source monitoring solution, built for performance, that bridges the\ngaps across all levels of your company. The wellbeing of your infrastructure\nis everyone’s business. [Keep up with the project](http://goo.gl/tcxtXq).\n\n *[Go tools & GitLab - how to do Continuous Integration like a boss](https://medium.com/pantomath/go-tools-gitlab-how-to-do-continuous-integration-like-a-boss-941a3a9ad0b6) was originally published on Medium.*\n\n*Cover photo by [Todd\nQuackenbush](https://unsplash.com/photos/IClZBVw5W5A?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non\n[Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)*\n\n{: .note}\n",[9,763,917],{"slug":2938,"featured":6,"template":700},"go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss","content:en-us:blog:go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss.yml","Go Tools And Gitlab How To Do Continuous Integration Like A Boss","en-us/blog/go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss.yml","en-us/blog/go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss",{"_path":2944,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2945,"content":2951,"config":2956,"_id":2958,"_type":14,"title":2959,"_source":16,"_file":2960,"_stem":2961,"_extension":19},"/en-us/blog/goldman-sachs-partners-with-gitlab-for-next-gen-platform-strategies",{"title":2946,"description":2947,"ogTitle":2946,"ogDescription":2947,"noIndex":6,"ogImage":2948,"ogUrl":2949,"ogSiteName":685,"ogType":686,"canonicalUrls":2949,"schema":2950},"Goldman Sachs partners with GitLab for next-gen platform strategies","Goldman Sachs’ George Grant shares how partnering with GitLab has modernized the development ecosystem.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671845/Blog/Hero%20Images/serverless-ops-blog.jpg","https://about.gitlab.com/blog/goldman-sachs-partners-with-gitlab-for-next-gen-platform-strategies","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Goldman Sachs partners with GitLab for next-gen platform strategies\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brein Matturro\"}],\n        \"datePublished\": \"2020-01-24\",\n      }",{"title":2946,"description":2947,"authors":2952,"heroImage":2948,"date":2953,"body":2954,"category":1040,"tags":2955},[823],"2020-01-24","\n\nMost people know Goldman Sachs as the global investment banking giant, but over the past few years the company has branched out to some pretty modern applications that go beyond the standard financial firm. At GitLab Commit Brooklyn 2019, [George Grant](https://www.linkedin.com/in/george-grant-21a9624), who runs the US SDLC engineering team at Goldman Sachs, explained how they’ve partnered with GitLab to help transform not only their development but the company as a whole.\n\n“It means we have to be a lot more nimble than we were in the past,” Grant says. “Now that we’re developing things that run on people’s iPhones, you need to have a different sort of infrastructure to do that.” The SDLC engineering team drives strategies for the development team, including legacy products, but also newer platforms like budgeting applications and the latest Apple credit card. The team is at the center of every business move within the organization.\n\n## Getting past the “dark times”\nGolman Sachs has about 10 [SDLCs running](/platform/), having grown organically into its own ecosystem over the years for various purposes. “Many of the things that we have at GS were designed in house – its our own workflow, our own tools doing code reviews, surrounding a minimum amount of external tools. Everthing thats involved in it is very tightly coupled with everything else,” Grant says.\n\nThe deployments, the issue tracker, the builds, and the testing are all linked together in order for everything to be controlled in one environment, including regulatory and compliance. This workflow is comfortable and controlled for users, but not ideal. “The problem is, it is sort of simultaneously its greatest strength and greatest weakness because the tightness of the coupling of the components makes it very difficult to replace any of the ones,” Grant says. If any part of the environment needs to be updated or switched out, it impacts all the others.\n\n\n\nThe engineering team started researching a new strategic direction, primarily looking for a modern Git-based solution. The goal was to find a tool that could alleviate developers’ SDLC workload and provide critical strategies for [cloud and Kubernetes](/2017/11/30/containers-kubernetes-basics/), allowing people to move away from the legacy stack. “You actually want to have something that gives you the freedom to innovate, but still have that control level around it.”\n\n## Creating a roadmap with GitLab\nGoldman Sachs chose GitLab as a way to move to the cloud, as an automation tool and to ultimately become the center of the ecosystem. “We didn’t want GitLab to be an island,” Grant says. Within the first two weeks of introducing GitLab, there were over 1600 users, underscoring the push for a new strategic platform.\n\nGitLab users can be innovative without restrictions. Each user group continues to work in their own world of tooling, but in a highly regulated environment. Reduced cycle times are another benefit, according to Grant. “We have one team that used to only be able to do a release every two weeks. Now they can do one and do another one five minutes later if they want to,” he says.\n\nFor an experienced company, the ability to integrate with legacy tools is important. On top of that, GS is embracing DevOps and QA metrics now that they have end-to-end visibility within the ecosystem. The transparency of GitLab allows Goldman Sachs to have input. “We have new ideas and new ways that we want to use the product to drive it strategically within GS,” Grant says.\n\n## Goldman Sachs and GitLab: Better together\nGoldman Sachs and GitLab have established a partnership. “The proof is in the pudding, as they say, and Goldman Sachs was very, very happy to become an investor in GitLab,” Grant says. As users of the tool, Goldman Sachs found it to be a natural investment opportunity. Bottom line, he says, people are demanding to use it more often. “We believe it is the strategic platform to take us into the future.”\n\nTo learn more about Goldman Sach’s implementation strategies, watch George Grant’s presentation from GitLab Commit Brooklyn 2019.\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Bu3nrxPy1-E\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nPhoto by [Tomasz Frankowski](https://unsplash.com/@sunlifter?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[763,9,721,1105,268],{"slug":2957,"featured":6,"template":700},"goldman-sachs-partners-with-gitlab-for-next-gen-platform-strategies","content:en-us:blog:goldman-sachs-partners-with-gitlab-for-next-gen-platform-strategies.yml","Goldman Sachs Partners With Gitlab For Next Gen Platform Strategies","en-us/blog/goldman-sachs-partners-with-gitlab-for-next-gen-platform-strategies.yml","en-us/blog/goldman-sachs-partners-with-gitlab-for-next-gen-platform-strategies",{"_path":2963,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2964,"content":2969,"config":2976,"_id":2978,"_type":14,"title":2979,"_source":16,"_file":2980,"_stem":2981,"_extension":19},"/en-us/blog/google-gitlab-serverless-webinar",{"title":2965,"description":2966,"ogTitle":2965,"ogDescription":2966,"noIndex":6,"ogImage":2853,"ogUrl":2967,"ogSiteName":685,"ogType":686,"canonicalUrls":2967,"schema":2968},"Container apps on serverless: Write once, deploy anywhere","Containers, serverless, and microservices, oh my! Cut to the chase and learn how to write apps once and deploy anywhere with emerging technologies.","https://about.gitlab.com/blog/google-gitlab-serverless-webinar","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Write once, deploy anywhere: Containerized applications on modern serverless platforms\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tina Sturgis\"}],\n        \"datePublished\": \"2019-06-13\",\n      }",{"title":2970,"description":2966,"authors":2971,"heroImage":2853,"date":2973,"body":2974,"category":300,"tags":2975},"Write once, deploy anywhere: Containerized applications on modern serverless platforms",[2972],"Tina Sturgis","2019-06-13","\n\nUsing containers has become standard practice in app development today. We all get the value of why you want to build with containers. But as a developer, why should you care about [serverless](/topics/serverless/)? It’s simple, you can eliminate worry about the infrastructure that your app is going to run on and focus on the impact of the app itself. Specifically the business logic of how the app will interact with things like the end users and/or operating systems.\n\nThe concepts of serverless quickly move the conversation towards one around a microservices architecture. As we move away from building applications in a monolith, moving towards serverless and eliminating the need to worry about that infrastructure begin to make a lot more sense.\n\nSo now, how do we take these concepts that we hear and/or read about that increase velocity, flexibility, and scalability, and put them into action for your own application development?\n\nFind out at our webinar, \"Running containerized applications on modern serverless platforms\" on Jun. 25, 2019 with GitLab and Google experts. We'll take a deep dive into how new and emerging technologies like Kubernetes, Knative, Cloud Run, and GitLab Serverless can provide great stability and scalability while lowering costs and increasing the pace of innovation.\n\n[Reserve your spot.](https://webinars.devops.com/running-containerized-applications-on-modern-serverless-platforms)\n{: .alert .alert-gitlab-purple .text-center}\n",[939,721,232,9,1228],{"slug":2977,"featured":6,"template":700},"google-gitlab-serverless-webinar","content:en-us:blog:google-gitlab-serverless-webinar.yml","Google Gitlab Serverless Webinar","en-us/blog/google-gitlab-serverless-webinar.yml","en-us/blog/google-gitlab-serverless-webinar",{"_path":2983,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2984,"content":2989,"config":2994,"_id":2996,"_type":14,"title":2997,"_source":16,"_file":2998,"_stem":2999,"_extension":19},"/en-us/blog/google-next-post",{"title":2985,"description":2986,"ogTitle":2985,"ogDescription":2986,"noIndex":6,"ogImage":1655,"ogUrl":2987,"ogSiteName":685,"ogType":686,"canonicalUrls":2987,"schema":2988},"What to check out at Google Cloud Next 2019","Support women who code by stopping by our booth, learn from a host of GitLab experts, and more.","https://about.gitlab.com/blog/google-next-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What to check out at Google Cloud Next 2019\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mayank Tahilramani\"}],\n        \"datePublished\": \"2019-04-04\",\n      }",{"title":2985,"description":2986,"authors":2990,"heroImage":1655,"date":2991,"body":2992,"category":300,"tags":2993},[2858],"2019-04-04","\n\nIt’s that time of the year to indulge in all things innovative and new at Google Cloud Next 2019.\nAs an attendee last year, I was excited to learn about Google’s vision on ‘bringing the cloud to you’\nwith a focus on hybrid cloud and unveiling of GKE On-Prem. GitLab’s partnership with Google\nhas grown a lot since we launched our quick and easy [integration with GKE](/partners/technology-partners/google-cloud-platform/)\nlast year and we hope you will come out to see some of the new things we have going on.\n\n### Don't be shy, come say hi 👋\n\nCome visit us at our booth (#S1607), get scanned, and GitLab will donate $5 to your\ncharity of choice: [Rail Girls](http://railsgirls.com/) or [Django Girls](https://djangogirls.org/).\nThis also enters you for a chance to win an iPad Pro!\n\nWhile you're there, we would love to showcase and talk about:\n\n* GitLab’s [AutoDevOps](https://docs.gitlab.com/ee/topics/autodevops/) functionality.\n* Using GitLab to [secure your applications](/stages-devops-lifecycle/secure/).\n* How to get started with [GitLab for GCP on GKE](/partners/technology-partners/google-cloud-platform/) and GKE On-Prem.\n* GitLab [Serverless with Knative](/topics/serverless/) and [Cloud Run](https://cloud.google.com/blog/products/serverless/announcing-cloud-run-the-newest-member-of-our-serverless-compute-stack),\n* ... and much more!\n\n### Sit back, relax, and listen to some of our experts live\n\n* Check out [Brandon Jung](/company/team/#brandoncjung) (VP of Alliances) discuss [GitLab’s move from Azure to GCP](https://cloud.withgoogle.com/next/sf/sessions?session=ARC207) which includes a technical\noverview of the migration as well as lessons learned. Check out our customer case study [here](https://cloud.google.com/customers/gitlab/).\n\n* Come listen to [Kathy Wang](/company/team/#wangkathy) (Senior Director of Security) tell our journey [Towards Zero Trust at GitLab.com](https://cloud.withgoogle.com/next/sf/sessions?session=SEC220) along with key lessons learned. ([You can read more about the evolution of Zero Trust here](/blog/evolution-of-zero-trust/).)\n\n* Learn something new with [Daniel Gruesso](/company/team/#danielgruesso) (Product Manager) showcasing GitLab’s serverless functionality to [Run a consistent serverless platform anywhere with Kubernetes and Knative](https://cloud.withgoogle.com/next/sf/sessions?session=HYB218).\n\n### Get hands on with Qwiklabs\n\nLearn from [Dan Gordon](/company/team/#dbgordon) (Senior Technical Marketing Manager) at our [Spotlight Lab: Introduction to GitLab on GKE](https://cloud.withgoogle.com/next/sf/sessions?session=301353-133371). Here you will have the chance to deploy GitLab on GKE, migrate a GitHub repository into a GitLab Project, and set up a CI/CD pipeline with AutoDevOps to deploy your code to GKE.\n\nSo stop by and say hello!\n\nWe are proud to be a sponsor at this event and would love to see as many of you at our booth (S1607) to discuss GitLab [Serverless](/topics/serverless/) with Knative and Cloud Run, GitLab’s integration with GKE, GitLab AutoDevOps for CI/CD, Security functionalities, as well as GitLab’s support for GKE On-Prem.\n",[999,1228,9,721,232,827,697,874],{"slug":2995,"featured":6,"template":700},"google-next-post","content:en-us:blog:google-next-post.yml","Google Next Post","en-us/blog/google-next-post.yml","en-us/blog/google-next-post",{"_path":3001,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3002,"content":3008,"config":3015,"_id":3017,"_type":14,"title":3018,"_source":16,"_file":3019,"_stem":3020,"_extension":19},"/en-us/blog/group-conversation-podcast",{"title":3003,"description":3004,"ogTitle":3003,"ogDescription":3004,"noIndex":6,"ogImage":3005,"ogUrl":3006,"ogSiteName":685,"ogType":686,"canonicalUrls":3006,"schema":3007},"How we turn our group conversations into a podcast with GitLab CI/CD","Want to listen to meetings on the go? Senior SRE John Jarvis explains how he turned his favorite remote meetings at GitLab into podcast format.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678626/Blog/Hero%20Images/group-conversation-podcast.jpg","https://about.gitlab.com/blog/group-conversation-podcast","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we turn our group conversations into a podcast with GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Jarvis\"}],\n        \"datePublished\": \"2019-07-03\",\n      }",{"title":3003,"description":3004,"authors":3009,"heroImage":3005,"date":3011,"body":3012,"category":718,"tags":3013},[3010],"John Jarvis","2019-07-03","\n[Group conversations](/handbook/group-conversations/) are my favorite remote meetings at\nGitLab because they are a great way to get an inside peek at what different teams are doing,\nhow they collaborate, and what features you might find in future GitLab releases.\nYou may already know that we have been livestreaming these on\n[GitLab Unfiltered](https://www.youtube.com/channel/UCMtZ0sc1HHNtGGWZFDRTh5A) for anyone curious about how GitLab operates.\n\nLately, when I have time to listen to these unfiltered discussions I am either not at a screen or not in a place\nwhere it is easy to watch a video. After seeing how [Support turned their weekly meeting into a podcast](/blog/how-we-turned-40-person-meeting-into-a-podcast/),\nI thought it would be nice to make the GitLab group conversation meetings into a podcast as well!\n\n[Subscribe to the GitLab Group Conversations podcast](https://gitlab-com.gitlab.io/gl-infra/podcasts/#podcasts)\n{: .alert .alert-gitlab-purple .text-center}\n\nNow in addition to the livestreams and videos, there is a podcast feed for GitLab group conversations.\nListen to these conversations on your favorite podcast player by accessing the feed on\n[the Group Conversations podcast page](https://gitlab-com.gitlab.io/gl-infra/podcasts/#podcasts).\n\nIf you like the format, please let us know by tweeting us [@GitLab](https://twitter.com/gitlab)\nand we will consider adding more!\n\n### Here is a bit more detail about how these podcasts are generated\n\n* Teams that livestream group conversations\n  [follow instructions  for broadcasting it live](/handbook/group-conversations/#livestream-the-video)\n  and creating the video. When the meeting is over, the video is made available on GitLab Unfiltered.\n\n* A daily GitLab CI job in the [podcasts project](https://gitlab.com/gitlab-com/gl-infra/podcasts)\n  downloads the group conversation videos and converts them to audio files. It's easy to create [pipeline schedules in GitLab](https://docs.gitlab.com/ee/ci/pipelines/schedules.html).\n\n  ![The podcast schedule](https://about.gitlab.com/images/blogimages/podcast-schedule.png){: .shadow.medium.center}\n\n* An RSS feed is generated and audio files are uploaded to object storage from the CI job\n\n* GitLab pages is used to host a static site to link to the feed\n\n* This is all automated in a CI pipeline that runs every hour!\n\n![Podcast pipelines](https://about.gitlab.com/images/blogimages/podcast-pipeline.png){: .shadow.medium.center}\n\nI hope you have the opportunity to tune into the group conversations at GitLab and\nalso take advantage of GitLab CI features like schedules to help automate your own\nworkflows!\n\nPhoto by [Lee Campbell](https://unsplash.com/@leecampbell?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/headphones?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[1064,3014,9],"remote work",{"slug":3016,"featured":6,"template":700},"group-conversation-podcast","content:en-us:blog:group-conversation-podcast.yml","Group Conversation Podcast","en-us/blog/group-conversation-podcast.yml","en-us/blog/group-conversation-podcast",{"_path":3022,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3023,"content":3028,"config":3033,"_id":3035,"_type":14,"title":3036,"_source":16,"_file":3037,"_stem":3038,"_extension":19},"/en-us/blog/guide-to-ci-cd-pipelines",{"title":3024,"description":3025,"ogTitle":3024,"ogDescription":3025,"noIndex":6,"ogImage":2088,"ogUrl":3026,"ogSiteName":685,"ogType":686,"canonicalUrls":3026,"schema":3027},"A quick guide to GitLab CI/CD pipelines","How GitLab is making a better pipeline with Auto DevOps.","https://about.gitlab.com/blog/guide-to-ci-cd-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A quick guide to GitLab CI/CD pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-07-12\",\n      }",{"title":3024,"description":3025,"authors":3029,"heroImage":2088,"date":3030,"body":3031,"category":1040,"tags":3032},[715],"2019-07-12","\nTo be successful with [DevOps](https://about.gitlab.com/topics/devops/), teams must use [automation](https://docs.gitlab.com/ee/topics/autodevops/), and [CI/CD pipelines](https://about.gitlab.com/topics/ci-cd/) are a big part of that journey. At its most basic level, a pipeline gets code from point A to point B. The quicker and more efficient the pipeline is, the better it will accomplish this task.\n## What is a CICD pipeline?\n\nA pipeline is the lead component of continuous integration, delivery, and deployment. It drives software development through building, testing and deploying code in stages. Pipelines are comprised of jobs, which define what will be done, such as compiling or testing code, as well as stages that spell out when to run the jobs. An example would be running tests after stages that compile the code.\n\nA CI/CD pipeline automates steps in the SDLC such as builds, tests, and deployments. When a team takes advantage of automated pipelines, they simplify the handoff process and decrease the chance of human error, creating faster iterations and better quality code. Everyone can see where code is in the process and identify problems long before they make it to production.\n\nBefore we dive in, let's cover some basics:\n\n## The GitLab pipeline glossary\n\n**Commit**: A code change.\n\n**Job**: Instructions that a runner has to execute.\n\n**Pipeline**: A collection of jobs split into different stages.\n\n**Runner**: An agent or server that executes each job individually that can spin up or down as needed.\n\n**Stages**: A keyword that defines certain stages of a job, such as `build` and `deploy`. Jobs of the same stage are executed in parallel.\nPipelines are configured using a version-controlled YAML file, `.gitlab-ci.yml`, within the root of a project. From there, you can set up parameters of your pipeline:\n\n*   What to execute using [GitLab Runner](https://docs.gitlab.com/ee/ci/runners/#configuring-gitlab-runners)\n*   What happens when a process succeeds or fails\n\nNot all jobs are so simple. For larger products that require cross-project interdependencies, such as those adopting a [microservices architecture](/blog/strategies-microservices-architecture/), there are [multi-project pipelines](/blog/use-multiproject-pipelines-with-gitlab-cicd/).\n\n![multi-project pipelines](https://about.gitlab.com/images/topics/multi-project_pipelines.png){: .shadow.medium.center }\n\nIn GitLab 9.3 we made it possible to display links for upstream and downstream projects directly on the pipeline graph, so developers can check the overall status of the entire chain in a single view. Pipelines continue to evolve, and in our [CI/CD product vision](https://about.gitlab.com/direction/ops/) we’re looking into making pipelines even more cohesive by implementing [Multiple Pipelines in a single `.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab-ce/issues/22972) in the future.\n\n## Pipeline as code\n\nDefining deployment pipelines through source code such as Git, is known as pipeline as a code. The pipeline as code practice is part of a larger “as code” movement that includes infrastructure as code. Teams can configure builds, tests, and deployment in code that is trackable and stored in a centralized source repository. They can use a declarative YAML approach or a vendor-specific programming language, such as Jenkins and Groovy, but the premise remains the same.\n\nA pipeline as code file specifies the stages, jobs, and actions for a pipeline to perform. Because the file is versioned, changes in pipeline code can be tested in branches with the corresponding application release.\n\nThe pipeline as code model of creating continuous integration pipelines is an industry best practice. There are multiple benefits, such as the ability to store CI pipelines and application code in the same repository. Developers can also make changes without additional permissions, working with tools they’re already using.\n\nOther benefits are more efficient collaboration and the ability to keep information accessible so team members can act on their decisions. Pipeline changes are subject to a code review process, avoiding any break in the pipeline migration.\n\nDeployment pipelines are in a version control system independent of continuous integration tools. Pipelines can be restored if the continuous integration system goes down. If a team wants to switch CI tools at another point, pipelines can be moved into a new system.\n\nIn the early iterations of [CI/CD](/topics/ci-cd/), DevOps tools set up pipelines as point-and-click or through a GUI. This originally presented a number of challenges:\n\n*   Auditing was limited to what was already built in\n*   Unable to collaborate\n*   Difficulty troubleshooting\n\nSomething as simple as rolling back to the last known config was an exercise in futility. CI/CD pipelines during this time were prone to breaking, lacked visibility, and were difficult to change.\n\nThe pipeline as code model corrected a lot of these pain points and offered the flexibility teams needed to execute efficiently. With source code, teams could use Git to search and introspect changes.\n\nToday, many tools have adopted YAML configuration as a best practice. GitLab CI/CD has used code, rather than GUI, since the beginning for pipeline configuration. \"Pipeline as code\" comes with many of the same benefits the other \"as code\" trends have:\n\n*   **Version control** – keep track of changes over time and revert to previous configurations easily\n*   **Audit trails** – know when and what changes were made to the source code\n*   **Ease of collaboration** – code is available to the team for improvements, suggestions, and updates\n*   **Knowledge sharing** – import templates and code snippets so teams can share best practices\n*   **Built-in Lint tool** – ensures YAML file is valid and assists new users\n\nThe principles of software development apply not only to the applications we deliver but also to _how_ we build them. The pipeline as code model creates automated processes that help developers build applications better and faster. Having everything documented in a source repository allows for greater visibility and collaboration so that everyone can continually improve processes, which is what DevOps is all about.\n\n## What are the different stages of a GitLab CI/CD pipeline?\n\nPipelines are comprised of jobs, which define _what_ to do, such as compiling or testing code; stages, which define _when_ to run the jobs; and runners, which are agents or servers that execute each job, and can spin up or down as needed.\n\nPipelines are generally executed automatically and don’t need any intervention once they are created. \n\nA typical pipeline generally consists of a few stages in the following order:\n\n### Test\nThe test stage is where the code is assess to ensure there are no bugs and it is working the way it was designed to before it reaches end users. The test stage has a job called deploy-to stage. Unit testing on small, discrete functions of the source may also done. All unit tests running against a code base are required to pass. If they don’t that creates a risk that must be addressed right away.\n\n### Deploy\nThe staging stage has a job called deploy-to-stage, where a team can conduct further tests and validation. It is followed by a production stage with a job called deploy-to-production. If the code passes a series of automated tests, often the build will automatically deploy. [The endpoint is typically pre-production deployment](https://www.techtarget.com/searchsoftwarequality/CI-CD-pipelines-explained-Everything-you-need-to-know). Once the build’s integrity is completely validated by stakeholders, it can be deployed to an actual production environment. Once the build passes pre-deployment testing, in a continuous deployment pipeline, it is automatically deployed to production.Then, it is monitored. To do so effectively requires collecting and [analyzing metrics](https://about.gitlab.com/topics/ci-cd/continuous-integration-metrics/) such as deployment frequency, deployment time and lead time for changes.\n\n## How do I set up a GitLab CI/CD pipeline?\nPipeline templates are useful because writing them from scratch is a time-consuming and onerous process. GitLab has pipeline templates for more than 30 popular programming languages and frameworks. Templates to help you get started can be found in our [CI template repository](https://gitlab.com/gitlab-org/gitlab/tree/master/lib/gitlab/ci/templates).\n\nA GitLab pipeline executes several jobs, stage by stage, with the help of automated code.\n\nA continuous integration pipeline involves building something from the scratch and testing the same in a development environment. It might occur to the developers to add something after building the application and pushing it into production. This can be done with the help of continuous integration where we can add the code even after it is deployed.\n\nThis phase includes testing as well where we can test with different approaches in the code.\n\n### CD Pipeline prerequisites \nTo get started, you need to set up an [Ubuntu 18.04 server](https://www.digitalocean.com/community/tutorials/initial-server-setup-with-ubuntu-18-04) along with a sudo non-root user and firewall. You also need at least 1 GB RAM and 1 CPU.\n\n[Docker](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-18-04) must be installed on the server.\nA user account on a GitLab instance with an enabled container registry. The free plan of the [official GitLab instance](https://gitlab.com/) meets the requirements. You can also host your own GitLab instance by following the [How To Install and Configure GitLab on Ubuntu 18.04 guide](https://www.digitalocean.com/community/tutorials/how-to-install-and-configure-gitlab-on-ubuntu-18-04).\nThen you should create a GitLab project, adding an HTML file to it. Later, you’ll copy the HTML file into an Nginx Docker image, which in turn, you will deploy to the server.\n\n1. Log in to your GitLab instance and click new project.\n2. Give it a proper Project name.\n3. Optionally add a Project description.\n4. Make sure to set the Visibility Level to Private or Public depending on your requirements.\n5. Finally click Create project\n\n## Building better pipelines with Auto DevOps\n\nCI/CD pipelines have automated so much of the development process, however, it will still take time to do the initial work of building and configuring them in your environment. But what if you aren’t sure what all the parts of your CI/CD pipeline should be? What are the best practices you should know at every stage?\n\nIn the past, there have only been two choices: Time-consuming configuration from scratch with complete customization, or an easier auto-configuration with much less flexibility. Developers have longed for the moment where they could click a button and have a complete pipeline with code quality, language detection, and all scripts included with very little manual work.\n\n[Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) is our solution to this problem. It is a pre-built, fully-featured CI/CD pipeline that automates the entire delivery process. Instead of having to choose between time and flexibility, GitLab offers both. In addition to the Auto DevOps template, GitLab offers several CI templates that can be modified as necessary, or you can override specific settings. Want all the power of Auto DevOps for a custom test job? Just override the `script` block for the `test` job and give it a try. Since templates are also modular, teams have the option to pull in only the parts they need.\n\nWe hope this blog post gives you some insight into how we approach pipeline as code and our larger vision for how we’re improving the CI/CD pipeline experience in the future. Automated pipelines increase development speed and improve code quality, and we’re actively working on making them even better and easier to use.\n\nCover image by [Gerrie van der Walt](https://unsplash.com/photos/m3TYLFI_mDo?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/pipes?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[721,875,9],{"slug":3034,"featured":91,"template":700},"guide-to-ci-cd-pipelines","content:en-us:blog:guide-to-ci-cd-pipelines.yml","Guide To Ci Cd Pipelines","en-us/blog/guide-to-ci-cd-pipelines.yml","en-us/blog/guide-to-ci-cd-pipelines",{"_path":3040,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3041,"content":3047,"config":3053,"_id":3055,"_type":14,"title":3056,"_source":16,"_file":3057,"_stem":3058,"_extension":19},"/en-us/blog/hosted-runners-for-gitlab-dedicated-available-in-beta",{"title":3042,"description":3043,"ogTitle":3042,"ogDescription":3043,"noIndex":6,"ogImage":3044,"ogUrl":3045,"ogSiteName":685,"ogType":686,"canonicalUrls":3045,"schema":3046},"Hosted Runners for GitLab Dedicated available in Beta","GitLab Dedicated customers can now scale their CI/CD workloads with no maintenance overhead.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663948/Blog/Hero%20Images/dedicatedcoverimage.png","https://about.gitlab.com/blog/hosted-runners-for-gitlab-dedicated-available-in-beta","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Hosted Runners for GitLab Dedicated available in Beta\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fabian Zimmer\"}],\n        \"datePublished\": \"2024-01-31\",\n      }",{"title":3042,"description":3043,"authors":3048,"heroImage":3044,"date":3050,"body":3051,"category":1062,"tags":3052},[3049],"Fabian Zimmer","2024-01-31","Managing fleets of runners can be complex and requires significant experience to ensure all CI/CD jobs can scale to meet the demands of developers. Hosted Runners for GitLab Dedicated, now available in Beta, allows customers to use runners that are fully managed by GitLab for CI/CD jobs running on GitLab Dedicated.\n\nHosted Runners for GitLab Dedicated brings the same flexibility, efficiency, and control of GitLab Dedicated to runners. The Beta release includes the following features:\n- Linux-based runners at the instance level\n- Complete isolation from other tenants, following the same principles as GitLab Dedicated\n- Auto-scaling\n- Fully managed by GitLab\n\nAdditional features will be included based on customer demand leading up to limited and general availability.\n\nAs we develop this new feature, we are making Hosted Runners for GitLab Dedicated available upon invitation for existing GitLab Dedicated customers. Please reach out to your Customer Success Manager or [contact sales](https://about.gitlab.com/sales/). You can learn more about Gitlab Dedicated [on our website](https://about.gitlab.com/dedicated/).",[695,693,9],{"slug":3054,"featured":6,"template":700},"hosted-runners-for-gitlab-dedicated-available-in-beta","content:en-us:blog:hosted-runners-for-gitlab-dedicated-available-in-beta.yml","Hosted Runners For Gitlab Dedicated Available In Beta","en-us/blog/hosted-runners-for-gitlab-dedicated-available-in-beta.yml","en-us/blog/hosted-runners-for-gitlab-dedicated-available-in-beta",{"_path":3060,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3061,"content":3067,"config":3072,"_id":3074,"_type":14,"title":3075,"_source":16,"_file":3076,"_stem":3077,"_extension":19},"/en-us/blog/hosted-runners-for-gitlab-dedicated-now-in-limited-availability",{"title":3062,"description":3063,"ogTitle":3062,"ogDescription":3063,"noIndex":6,"ogImage":3064,"ogUrl":3065,"ogSiteName":685,"ogType":686,"canonicalUrls":3065,"schema":3066},"Hosted runners for GitLab Dedicated: Now in limited availability"," Simplify CI/CD infrastructure management with hosted runners for GitLab Dedicated, a fully managed solution that handles all aspects of runner infrastructure.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664751/Blog/Hero%20Images/AdobeStock_640077932.jpg","https://about.gitlab.com/blog/hosted-runners-for-gitlab-dedicated-now-in-limited-availability","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Hosted runners for GitLab Dedicated: Now in limited availability\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabriel Engel\"}],\n        \"datePublished\": \"2025-01-23\",\n      }",{"title":3062,"description":3063,"authors":3068,"heroImage":3064,"date":3069,"body":3070,"category":693,"tags":3071},[2192],"2025-01-23","We are excited to announce that hosted runners for [GitLab Dedicated](https://about.gitlab.com/dedicated/), our single-tenant SaaS solution, have transitioned from [beta](https://about.gitlab.com/blog/hosted-runners-for-gitlab-dedicated-available-in-beta/) to limited availability, marking a significant milestone in our commitment to simplifying CI/CD infrastructure management for our customers.\n\n## Streamlined CI/CD infrastructure management\n\nManaging runner infrastructure has traditionally been a complex undertaking, requiring dedicated resources and expertise to maintain optimal performance. Hosted runners for GitLab Dedicated eliminates these challenges by providing a fully managed solution that handles all aspects of runner infrastructure. This allows your teams to focus on what matters most – building and deploying great software.\n\n## Key benefits\n\n### Reduced operational overhead\n\nBy choosing hosted runners, you can eliminate the complexity of provisioning, maintaining, and securing your runner infrastructure. Our fully managed service handles all aspects of runner operations, from deployment to updates and security patches.\n\n### Automatic scaling\n\nHosted runners automatically scale to match your CI/CD demands, ensuring consistent performance during high-traffic periods and for large-scale projects. This dynamic scaling capability means you'll always have runners available to pick up your CI/CD jobs and ensure optimal efficiency of your development teams.\n\n### Cost optimization\n\nWith hosted runners, you only pay for the resources you actually use. This consumption-based model eliminates the need to maintain excess capacity for peak loads, potentially reducing your infrastructure costs while ensuring resources are available when needed.\n\n### Enterprise-grade security\n\nFollowing the same security principles as GitLab Dedicated, hosted runners provide complete isolation from other tenants and are secure by default. Jobs are executed in fully-isolated VMs with no inbound traffic allowed. This means you can maintain the highest security standards without the complexity of implementing and maintaining security measures yourself.\n\n## Introducing native Arm64 support\n\nOur hosted runners now include native Arm64 support in addition to our existing x86-64 runners, offering significant advantages for modern development workflows.\n\n### Enhanced performance for Arm-based development\n\nNative Arm64 runners enable you to build, test, and deploy Arm-based applications in their native environment, ensuring optimal performance and compatibility. Teams developing Docker images or services targeting Arm-based cloud platforms can see build times cut significantly, accelerating their development cycles and deployments.\n\n### Cost-efficient computing\n\nArm-based runners can significantly reduce your computing costs, due to their efficient processing architecture and lower cost per minute. For compatible jobs, this means more affordable pipeline execution.\n\n### Native building capabilities\n\nWith support for both x86-64 and Arm64 architectures, you can:\n- build and test applications natively on either architecture\n- create multi-architecture container images efficiently\n- validate cross-platform compatibility in your CI/CD pipeline\n- optimize your delivery pipeline for specific target platforms\n- eliminate the performance overhead of emulation when building for Arm targets\n\nThis dual-architecture support ensures you have the flexibility to choose the right environment for each specific workload while maintaining a consistent and efficient CI/CD experience across all your projects.\n\n## Available runner sizes\n\nWe're expanding our runner offerings to include both x86-64 and Arm64 architectures with a range of configurations. The following sizes are available:\n\n| Size | vCPUs | Memory | Storage |\n|------|--------|---------|----------|\n| Small    | 2      | 8 GB    | 30 GB    |\n| Medium    | 4      | 16 GB   | 50 GB    |\n| Large    | 8      | 32 GB   | 100 GB   |\n| X-Large   | 16     | 64 GB   | 200 GB   |\n| 2X-Large  | 32     | 128 GB  | 200 GB   |\n\nThis expanded size support allows you to optimize your CI/CD pipeline performance based on your application's specific requirements.\n\n## What's next for hosted runners\n\nWe plan to release hosted runners in general availability in May 2025. The release includes compute minute visualization to help you better understand and control your CI/CD usage across your organization.\n\nWe'll be expanding our hosted runners offering with several new features coming later this year:\n- Network controls for enhanced security and compliance\n- MacOS runners to support application development for the Apple ecosystem\n- Windows runners for .NET and Windows-specific workloads\n\nThese additions will provide even more flexibility and coverage for your CI/CD needs, allowing you to consolidate all your build and test workflows on GitLab Dedicated hosted runners.\n\nReady to simplify your CI/CD infrastructure? Contact your GitLab representative or [reach out to our sales team](https://about.gitlab.com/dedicated/) to learn more about hosted runners for GitLab Dedicated.\n",[696,495,695,1062,9,185],{"slug":3073,"featured":6,"template":700},"hosted-runners-for-gitlab-dedicated-now-in-limited-availability","content:en-us:blog:hosted-runners-for-gitlab-dedicated-now-in-limited-availability.yml","Hosted Runners For Gitlab Dedicated Now In Limited Availability","en-us/blog/hosted-runners-for-gitlab-dedicated-now-in-limited-availability.yml","en-us/blog/hosted-runners-for-gitlab-dedicated-now-in-limited-availability",{"_path":3079,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3080,"content":3086,"config":3092,"_id":3094,"_type":14,"title":3095,"_source":16,"_file":3096,"_stem":3097,"_extension":19},"/en-us/blog/hosting-vuejs-apps-using-gitlab-pages",{"title":3081,"description":3082,"ogTitle":3081,"ogDescription":3082,"noIndex":6,"ogImage":3083,"ogUrl":3084,"ogSiteName":685,"ogType":686,"canonicalUrls":3084,"schema":3085},"How to host VueJS apps using GitLab Pages","Follow this tutorial, including detailed configuration guidance, to quickly get your application up and running for free.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683489/Blog/Hero%20Images/hosting.png","https://about.gitlab.com/blog/hosting-vuejs-apps-using-gitlab-pages","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to host VueJS apps using GitLab Pages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sophia Manicor\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2023-09-13\",\n      }",{"title":3081,"description":3082,"authors":3087,"heroImage":3083,"date":3089,"body":3090,"category":718,"tags":3091},[3088,1957],"Sophia Manicor","2023-09-13","\nIf you use VueJS to build websites, then you can host your website for free with GitLab Pages. This short tutorial walks you through a simple way to host and deploy your VueJS applications using GitLab CI/CD and GitLab Pages.\n\n## Prequisites\n- A VueJS application\n- Working knowledge of GitLab CI\n- 5 minutes\n\n## Setting up your VueJS application\n\n1) Install vue-cli.\n\n```bash\nnpm install -g @vue/cli\n# OR\nyarn global add @vue/cli\n```\nYou can check you have the right version of Vue with:\n\n```bash\nvue --version\n```\n\n2) Create your application using:\n\n```bash\nvue create name-of-app\n```\n\nWhen successfully completed, you will have a scaffolding of your VueJS application.\n\n## Setting up .gitlab-ci.yml for GitLab Pages\nBelow is the [GitLab CI configuration](https://gitlab.com/demos/applications/vuejs-gitlab-pages/-/blob/main/.gitlab-ci.yml) necessary to deploy to GitLab Pages. Put this file into your root project. GitLab Pages always deploys your website from a specific folder called `public`.\n\n```yaml\nimage: \"node:16-alpine\"\n\nstages:\n  - build\n  - test\n  - deploy\n\nbuild:\n  stage: build\n  script:\n    - yarn install --frozen-lockfile --check-files --non-interactive\n    - yarn build\n  artifacts:\n    paths:\n      - public\n\npages:\n  stage: deploy\n  script:\n    - echo 'Pages deployment job'\n  artifacts:\n    paths:\n      - public\n  only:\n    - main\n\n```\n\n## Vue config (vue.config.js)\nIn Vue, the artifacts are built in a folder called dist, in order for GitLab to deploy to Pages, we need to change the path of the artifacts. One way to do this is by changing the [Vue config file](https://gitlab.com/demos/applications/vuejs-gitlab-pages/-/blob/main/vue.config.js), `vue.config.js`.\n\n```\nconst { defineConfig } = require('@vue/cli-service')\n\nfunction publicPath () {\n  if (process.env.CI_PAGES_URL) {\n    return new URL(process.env.CI_PAGES_URL).pathname\n  } else {\n    return '/'\n  }\n}\n\nmodule.exports = defineConfig({\n  transpileDependencies: true,\n  publicPath: publicPath(),\n  outputDir: 'public'\n})\n```\n\nHere we have set `outputDir` to `public` so that GitLab will pick up the build artifacts and deploy to Pages. Another important piece when creating this configuration file is to change the `publicPath`, which is the base URL your application will be deployed at. In this case, we have create a function `publicPath()` that checks if the CI_PAGES_URL environment variable is set and returns the correct base URL.\n\n## Run GitLab CI\n\n![vuejs-gitlab-pages-pipeline](https://about.gitlab.com/images/blogimages/2023-05-11-hosting-vuejs-apps-using-gitlab-pages/vuejs-gitlab-pages-pipeline.png){: .shadow}\n\n\n## Check Pages to get your URL\n\n![gitlab-pages-domain](https://about.gitlab.com/images/blogimages/2023-05-11-hosting-vuejs-apps-using-gitlab-pages/gitlab-page-domain.png){: .shadow}\n\nVoila! You have set up a VueJS project with a fully functioning CI/CD pipeline. Enjoy your VueJS application hosted by GitLab Pages!\n\n## References\n- [https://cli.vuejs.org/guide/installation.html](https://cli.vuejs.org/guide/installation.html)\n- [https://cli.vuejs.org/guide/creating-a-project.html](https://cli.vuejs.org/guide/creating-a-project.html)\n- [https://gitlab.com/demos/applications/vuejs-gitlab-pages](https://gitlab.com/demos/applications/vuejs-gitlab-pages)\n\n",[9,917,785,786],{"slug":3093,"featured":6,"template":700},"hosting-vuejs-apps-using-gitlab-pages","content:en-us:blog:hosting-vuejs-apps-using-gitlab-pages.yml","Hosting Vuejs Apps Using Gitlab Pages","en-us/blog/hosting-vuejs-apps-using-gitlab-pages.yml","en-us/blog/hosting-vuejs-apps-using-gitlab-pages",{"_path":3099,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3100,"content":3106,"config":3111,"_id":3113,"_type":14,"title":3114,"_source":16,"_file":3115,"_stem":3116,"_extension":19},"/en-us/blog/how-carrefour-and-thales-are-evolving-their-ci-cd-platforms",{"title":3101,"description":3102,"ogTitle":3101,"ogDescription":3102,"noIndex":6,"ogImage":3103,"ogUrl":3104,"ogSiteName":685,"ogType":686,"canonicalUrls":3104,"schema":3105},"How Carrefour and Thales are evolving their CI/CD platforms","Learn how a large retailer and an aerospace and defense company are using GitLab to evolve their CI/CD platforms to increase developer productivity.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662061/Blog/Hero%20Images/cicdcover.png","https://about.gitlab.com/blog/how-carrefour-and-thales-are-evolving-their-ci-cd-platforms","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Carrefour and Thales are evolving their CI/CD platforms\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kristina Weis\"}],\n        \"datePublished\": \"2024-02-05\",\n      }",{"title":3101,"description":3102,"authors":3107,"heroImage":3103,"date":3108,"body":3109,"category":1288,"tags":3110},[1265],"2024-02-05","An efficient and secure CI/CD platform can be especially critical for some organizations — say, a large multinational retailer or an aerospace and defense company. Fortunately, Samuel Le Garec, a technical architect from Carrefour, and Jordan Dubié, Chief Product Owner for the software engineering environment at Thales, joined GitLab for a roundtable discussion at our [DevSecOps World Tour](https://about.gitlab.com/events/devsecops-world-tour/) stop in Paris. They shared their experiences and lessons learned from building CI/CD platforms for thousands of developers, and improving productivity and security along the way.\n\nHere are some of the highlights from their conversation.\n\n**Can you tell me about yourselves and your team?**\n\nDubié: I am the Chief Product Owner of the software factory, which is the software development environment of the Thales Group. [Thales](https://www.thalesgroup.com/en) is a French industrial group that operates in the aerospace, defense, security, and digital identity sectors. Thales is 80,000 people, including 30,000 engineers, specifically 15,000 software engineers. \n\nToday, our team is a little less than 50 people. We also do the build and the run and have a part, therefore, in 24-7 support, following the sun. We have different instances, and we have domains of different sensitivity. So we have several platforms, all self-hosted. On each of these platforms, we deploy the entire environment of the software factory. We also run a part of it, and then we are supported by other teams who take over these activities.\n\nLe Garec: I am an architect at [Carrefour](https://www.carrefour.com/en). I’m responsible for the engineering platform within the software factory and implementing the CI/CD platforms for the development teams. We have between 1,000 and 1,500 developers, so they commit like crazy. Today, I have a team of about 15 people. Our primary focus is to evolve the CI/CD platform, operate it, and manage incidents. We do that 24-7 today. We work mainly for the IT department in France. Then, I have other people who set up all the CI/CD templating and blueprints, the goal being to speed up the project as much as possible.\n\n**Can you share some numbers to give us a sense for the scale of GitLab in your organization?**\n\nDubié: Since we’ve been using GitLab, we’re at 30,000 projects, 70,000 issues, and 280,000 merge requests.\n\nLe Garec: At Carrefour, I think we have less GitLab experience than Jordan since we got our GitLab subscription at the beginning of the year. We have over 10,000 projects today on our CI/CD platform and then many pipeline executions, builds, and deployments that are as automated as possible. And we have 100,000 commits per month, to give you an idea of our activity.\n\n**What was it like before GitLab?**\n\nDubié: It was a very heterogeneous environment. We had all these different business entities, each with their own IT teams that were on site. I was in Toulouse, but there was an entity in Bordeaux, as well. The different environments in Bordeaux, Toulouse, Valence, and Paris were completely separate platforms. That was a major obstacle to collaboration in the group. We had all these heterogeneous platforms with tools that, most of the time, hadn’t been updated in three to four years. So, in terms of functionality, we were far behind the market's state of the art.\n\nLe Garec: We were deploying many different tools. We realized that maintaining our platform was becoming more and more complicated as the maturity of the development teams increased. They always asked us for more tools, and we reached a point where we couldn’t afford to do it anymore. That’s when we thought about an all-in-one solution. And that’s when we chose GitLab.\n\n**Is GitLab new for you? What are your initial goals?**\n\nLe Garec: GitLab is new for us — we started using it in April of this year. We are using GitLab Ultimate SaaS. Our goal is to use as many GitLab Ultimate features as possible. Our plan for the first two or three years is to migrate our source code to GitLab. So far, we’ve managed to move off of Bitbucket. Our next goal is to get rid of Jenkins and move to GitLab CI.\n\n> Learn [how to migrate from Jenkins to GitLab](https://about.gitlab.com/blog/jenkins-to-gitlab-migration-made-easy/).\n\n**How has GitLab helped to improve the speed and quality of your delivery processes?**\n\nDubié: We bet everything on automation. We go all out on everything that can support CI/CD so that developers can get feedback as quickly as possible. We also use SAST, DAST, secret detection, and software composition analysis.\n\nOne thing that was important for us was autonomy. We try to give our developers as much autonomy as possible so that people can create their own group projects. It seems very simple, but in the previous platform with Bitbucket, you had to go through tickets to have that; it was not in the hands of the developers. Today, there is much more autonomy, which allows them to go faster.\n\nWe also benefit from common, shared features; the shared runners, for example, are something we appreciate. We are already working on them because they will add a lot of value for developers who today have difficulties having a development environment that meets their expectations.\n\n**How would you describe an ideal software delivery process with GitLab?**\n\nLe Garec: For me, an almost ideal delivery process is full automation with everything automated — the build, the test, the releases, the versioning, and the deployment, using deployment techniques such as canary deployments and feature flags. For me, that’s something we want and need to go to, at Carrefour at least.\n\n**What are the reactions of the users or developers using GitLab?**\n\nLe Garec: They are pleased. One of the reasons we went to GitLab was the lack of features we had on Bitbucket. Bitbucket had some features, but every time you have to add plugins, it’s paid every time. It’s annoying. So that’s why we went to GitLab and the whole set of features that GitLab offers. It pleases the users to have a homogeneous platform where they don’t have to change tools every five minutes. It improves their productivity, and we have fewer incidents.\n\n*Editor's note: This blog post is based on an edited version of the session transcript that was translated from French.*\n",[9,720,696],{"slug":3112,"featured":91,"template":700},"how-carrefour-and-thales-are-evolving-their-ci-cd-platforms","content:en-us:blog:how-carrefour-and-thales-are-evolving-their-ci-cd-platforms.yml","How Carrefour And Thales Are Evolving Their Ci Cd Platforms","en-us/blog/how-carrefour-and-thales-are-evolving-their-ci-cd-platforms.yml","en-us/blog/how-carrefour-and-thales-are-evolving-their-ci-cd-platforms",{"_path":3118,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3119,"content":3125,"config":3130,"_id":3132,"_type":14,"title":3133,"_source":16,"_file":3134,"_stem":3135,"_extension":19},"/en-us/blog/how-cube-uses-gitlab-to-increase-efficiency-and-productivity",{"title":3120,"description":3121,"ogTitle":3120,"ogDescription":3121,"noIndex":6,"ogImage":3122,"ogUrl":3123,"ogSiteName":685,"ogType":686,"canonicalUrls":3123,"schema":3124},"Cube reduces toolchain complexity and speeds software delivery with GitLab","Software maker shares how the DevSecOps Platform helps improve customer collaboration and streamline releases.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668437/Blog/Hero%20Images/faster-cycle-times.jpg","https://about.gitlab.com/blog/how-cube-uses-gitlab-to-increase-efficiency-and-productivity","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Cube reduces toolchain complexity and speeds software delivery with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2023-02-07\",\n      }",{"title":3120,"description":3121,"authors":3126,"heroImage":3122,"date":3127,"body":3128,"category":1288,"tags":3129},[1081],"2023-02-07","Six years ago, [Cube](https://cube.nl/), a software development company based in the Netherlands, struggled with a fragmented and siloed toolchain. The company also wanted a comprehensive solution that would pull together development, security, and operations teams and create a single source of truth for better collaboration with their customers. So they opted for GitLab’s DevSecOps Platform.\n\nOperations Manager Mans Booijink and Lead Developer Remi Buijvoets sat down with GitLab to share how moving to an all-in-one DevSecOps platform has made a significant and positive difference at Cube – internally and with customers – and why they are happy with the move.\n\nBefore diving into their migration story, here's a snapshot of the improvements Cube has achieved with GitLab:\n\n- Improved [CI/CD](/topics/ci-cd/) delivery speed\t\n- Improved service-level agreement (SLA) response time\n- 80% of Cube clients are actively working in GitLab\n- Single source of truth collaboration\n\n![image of Mans Booijink](https://about.gitlab.com/images/blogimages/MansBooijinkCube.jpg) | ![image of Remi Buijvoets](https://about.gitlab.com/images/blogimages/RemiBuijvoetsCube.jpg)\nMans Booijink | Remi Buijvoets\n\n**Why did Cube make the switch to GitLab?**\n\nBefore making the switch, we used a combination of Bitbucket, Trello, and Jira. The combination caused a [fragmented, siloed toolchain](/blog/battling-toolchain-technical-debt/). There were a lot of opportunities for improving efficiency. So we started using GitLab because we wanted a DevOps system that includes ticket management, security, and customer collaboration. Not only does GitLab operate efficiently, but adoption is also fast and easy. In fact, we tripled our GitLab user base from 20 to nearly 70 in the past three years. Now almost the entire Cube organization uses GitLab. Our clients and colleagues (designers, developers, and project managers) can communicate easily by working in the same GitLab environment. \n\n**Did Cube try anything else before GitLab?**\n\nWe tried out other tools before moving to GitLab. But GitLab offered everything we needed – ticket management, CI/CD, DevOps, versioning, file management, and security. It didn’t take long to realize that GitLab was perfect because we wanted to have one tool where we all work together with our clients and team.\n\n**Now that Cube uses GitLab, how has your toolchain changed?**\n\nGitLab has simplified everything into a single source of truth. We deploy to a private cloud in the Netherlands, and we integrated Sentry into GitLab for error monitoring. Still, otherwise, everything we do is in GitLab.\n\n**How has GitLab CI/CD helped Cube?**\n\nGitLab CI helps us automate the software development process by using GitLab pipelines and a runner to deploy our code. Within the pipelines, we run linters to check code quality. Also, Unit and functional tests are executed to ensure the functioning of the application. When an error occurs or when a test fails, the pipeline will fail. GitLab CI adds value to our software development lifecycle because it prevents bugs from being deployed and helps deploy features while other features are still in development. In addition, it helps us monitor our projects’ quality, which is essential.\n\nGitLab CI surely helps in the speed of delivery, but also a lot in facilitating rollbacks when a release accidentally contains an error. We use a release schedule, and GitLab helps us to automate most of the work so that we can deliver right on time.\n\nGitLab CD improves the speed of our code deployments. When the pipelines succeed, developers can ensure everything works as it should. As a result, developers can focus more on the development itself and don’t have to lose focus by giving thoughts on the deployment. We have also introduced a local machine that builds releases. The machine is hosted on-premises and has a lot of resources. Multiple GitLab Runners use this machine to build releases. Using the GitLab Runners with an on-premises computer was easy to configure.\n\n**How have GitLab’s Agile capabilities helped your company become more efficient?**\n\nBefore GitLab, we used a variety of communication channels with our customers. Now GitLab gives us a central, accessible place for all communications. We also use [GitLab for Agile development](/solutions/agile-delivery/). We have our epics to manage bigger development projects, and we also utilize milestones. We do four to six releases weekly, and we all manage them by creating milestones that everyone can see, including the client and the development team. We provide all tickets with requirements, estimated time, when we intend to release it, who has to test it, etc. We use GitLab for 90% of our project management, Agile management, and working functionalities, which is proving very effective.\n\n**How do you measure the success of your GitLab migration?**\n\nWe measure the lead time of delivery. The results of each separate phase in the delivery are accurately logged. This way we can demonstrate that we comply with the agreements made to our customers. We also keep track of how efficient the different phases during the development process are, and whether they improve compared to the past.\n\nBecause we use GitLab integrally throughout the process, it is a very valuable and useful source of information.",[696,720,9],{"slug":3131,"featured":6,"template":700},"how-cube-uses-gitlab-to-increase-efficiency-and-productivity","content:en-us:blog:how-cube-uses-gitlab-to-increase-efficiency-and-productivity.yml","How Cube Uses Gitlab To Increase Efficiency And Productivity","en-us/blog/how-cube-uses-gitlab-to-increase-efficiency-and-productivity.yml","en-us/blog/how-cube-uses-gitlab-to-increase-efficiency-and-productivity",{"_path":3137,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3138,"content":3144,"config":3150,"_id":3152,"_type":14,"title":3153,"_source":16,"_file":3154,"_stem":3155,"_extension":19},"/en-us/blog/how-gitlab-supports-the-nsa-and-cisa-cicd-security-guidance",{"title":3139,"description":3140,"ogTitle":3139,"ogDescription":3140,"noIndex":6,"ogImage":3141,"ogUrl":3142,"ogSiteName":685,"ogType":686,"canonicalUrls":3142,"schema":3143},"How GitLab supports NSA and CISA CI/CD security guidance","GitLab can support your alignment with NSA and CISA CI/CD recommendations and best practices for cloud-based DevSecOps environments.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683032/Blog/Hero%20Images/vaultimage.png","https://about.gitlab.com/blog/how-gitlab-supports-the-nsa-and-cisa-cicd-security-guidance","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab supports NSA and CISA CI/CD security guidance\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Joseph Longo\"}],\n        \"datePublished\": \"2023-09-19\",\n      }",{"title":3139,"description":3140,"authors":3145,"heroImage":3141,"date":3147,"body":3148,"category":697,"tags":3149},[3146],"Joseph Longo","2023-09-19","\nIn June, the National Security Agency (NSA) and the Cybersecurity and Infrastructure Security Agency (CISA) [issued a joint cybersecurity information sheet (CSI)](https://media.defense.gov/2023/Jun/28/2003249466/-1/-1/0/CSI_DEFENDING_CI_CD_ENVIRONMENTS.PDF) providing recommendations and best practices for cloud-based DevSecOps environments. Specifically, the CSI focuses on security hardening best practices for continuous integration/continuous delivery (CI/CD) cloud deployments.\n\nLet's take a look at the relevant threats, recommended countermeasures, and how the [GitLab DevSecOps Platform](https://about.gitlab.com/platform/) can support the implementation and enforcement of the countermeasures to help secure your CI/CD environment.\n\n## CI/CD environments are under threat\nOver the past few years, the software supply chain, and specifically CI/CD environments, have become a persistent and valuable target for malicious actors. Theft of proprietary code and data, injection of malicious links and redirects, and denial-of-service attacks are a few examples of why CI/CD environments have been such lucrative targets for threat actors.\n\nThe CSI outlines examples of common risks in CI/CD pipelines. These risks include:\n* insecure first-party code\n* insecure third-party code\n* poisoned pipeline execution\n* insufficient pipeline access controls\n* insecure system configuration\n* usage of insecure third-party services\n* exposure of secrets\n\nAdditional context can be found in the CSI and in [OWASP's top 10 CI/CD security risks](https://owasp.org/www-project-top-10-ci-cd-security-risks/).\n\nNote: The CSI contains helpful information on potential threat scenarios and illustrations to help visualize different attack vectors.\n\n## Hardening recommendations for CI/CD environment\nAs a single, all-inclusive DevSecOps platform, GitLab's features support the implementation of the recommended mitigations from the NSA and CISA.\n\n### Authentication and access mitigation\nHere are the features that align with authentication and access mitigation.\n\n#### Use NSA-recommended cryptography\n_\"NSA and CISA recommend the implementation and configuration of strong cryptographic algorithms when configuring cloud applications and services.\"_\n\nGitLab's [GitLab.com](https://about.gitlab.com/solutions/) and [GitLab Dedicated](https://about.gitlab.com/dedicated/) SaaS solutions implement TLS 1.2+ for encrypting data in transit and AES-256-bit encryption for data at rest. You can learn more about our approach to cryptography in our [Cryptography Standard](https://about.gitlab.com/handbook/security/cryptographic-standard.html).\n\n#### Minimize the use of long-term credentials\n_\"Use strong credentials that are resistant to stealing, phishing, guessing, and replaying wherever and whenever possible.\"_\n\nTo support the use of strong credentials, GitLab enables you to centralize authentication and authorization responsibilities for your GitLab instance through [SAML SSO](https://docs.gitlab.com/ee/user/group/saml_sso/) integrations. GitLab integrates with a wide range of identity providers to support our customers’ diverse tech stacks. GitLab also supports the System for Cross-Domain Identity Management ([SCIM](https://docs.gitlab.com/ee/user/group/saml_sso/scim_setup.html)). Through GitLab’s SSO and SCIM integrations, you can automate the lifecycle of your user identities in a secure and efficient manner.\n\n[SSO](https://docs.gitlab.com/ee/integration/saml.html) and [SCIM](https://docs.gitlab.com/ee/administration/settings/scim_setup.html) are also available for GitLab self-managed customers.\n\nGitLab supports [two-factor authentication](https://docs.gitlab.com/ee/user/profile/account/two_factor_authentication.html). Customers can enable one or both of the following second factors of authentication:\n\n* time-based one-time passwords ([TOTP](https://datatracker.ietf.org/doc/html/rfc6238))\n* WebAuthn devices\n\n> Check out our [Ultimate guide to enabling SAML and SSO on GitLab.com](https://about.gitlab.com/blog/the-ultimate-guide-to-enabling-saml/) for more information.\n\n#### Add signature to CI/CD configuration and verify it\n_\"NSA and CISA recommend implementing secure code signing to establish digital trust\nwithin the CI/CD pipeline.\"_\n\nGitLab enables its customers to [sign commits](https://docs.gitlab.com/ee/user/project/repository/signed_commits/) using:\n* an [SSH key](https://docs.gitlab.com/ee/user/project/repository/signed_commits/ssh.html)\n* a [GPG key](https://docs.gitlab.com/ee/user/project/repository/signed_commits/gpg.html)\n* a [personal x.509 certificate](https://docs.gitlab.com/ee/user/project/repository/signed_commits/x509.html)\n\nGitLab's [push rules](https://docs.gitlab.com/ee/user/project/repository/push_rules.html) feature can also be used to reject individual commits if they are not signed with GPG, or you can choose to reject all commits from unverified users.\n\n![Signed commits](https://about.gitlab.com/images/blogimages/2023-09-07-how-gitlab-supports-the-nsa-and-cisa-cicd-security-guidance/signed-commits.png)\n\nSigned commits verified and unverified badges\n{: .note.text-center}\n\n#### Utilize two-person rules (2PR) for all code updates\n_\"No single developer should be able to check in code without another developer\nreviewing and approving the changes.\"_\n\nGitLab enables users to configure their [merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/) (MRs) so that they must be approved before they can be merged. MR approvals allow users to set the minimum number of required approvals before work can merge into a project. Some examples of rules you can create include:\n* Users with specific permissions can always approve work.\n* [Code owners](https://docs.gitlab.com/ee/user/project/codeowners/index.html) can approve work for files they own.\n* Users with specific permissions can approve work, [even if they don’t have merge rights](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/rules.html#merge-request-approval-segregation-of-duties) to the repository.\n* Users with specific permissions can be allowed or denied the ability to [override approval rules on a specific MR](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/rules.html#edit-or-override-merge-request-approval-rules).\n\nGitLab's MR approval [rules](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/rules.html) and [settings](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/settings.html) can be configured and adapted to meet your organization's requirements and align with your risk tolerance.\n\n![MR approval settings](https://about.gitlab.com/images/blogimages/2023-09-07-how-gitlab-supports-the-nsa-and-cisa-cicd-security-guidance/mr-approval-settings.png)\n\nExample of MR approval requirements\n{: .note.text-center}\n\n#### Implement least-privilege policies for CI/CD access\n_\"The CI/CD pipeline should not be accessible by everyone in the organization.\" \n\"Mitigate password risks by implementing multi-factor authentication (MFA).\"_\n\nGitLab enables you to [assign users a role](https://docs.gitlab.com/ee/user/permissions.html) when you add them to a project or group. A user’s role determines the actions they can take within your GitLab instance. The following roles are available for assignment:\n* Guest (private and internal projects only)\n* Reporter\n* Developer\n* Maintainer\n* Owner\n* Minimal access (available for the top-level group only)\n\nGitLab's role-based access control (RBAC) model enables you to limit a user’s permissions in accordance with the [principle of least privilege](https://csrc.nist.gov/glossary/term/least_privilege) and your business and information security requirements.\n\nAs mentioned [above](#minimize-the-use-of-long-term-credentials), GitLab supports two-factor authentication and can integrate with several SSO providers to support your tech stack and help you centralize authentication and authorization responsibilities.\n\n#### Secure user accounts\n_\"Regularly audit administrative user accounts and configure access controls under the\nprinciples of least privilege and separation of duties. Audit logs to ensure new accounts\nare legitimate.\"_\n\nAs mentioned in the [previous section](#implement-least-privilege-policies-for-cicd-access), GitLab enables you to assign roles and associated permissions to your users in a way that aligns with your business and information security requirements. GitLab's authorization feature enables you to support the principle of least privilege and the concept of separation of duties.\n\nKeep reading to understand how GitLab supports the NSA and CISA's audit log guidance.\n\n#### Secure secrets\n_\"Secure handling of secrets, tokens, and other credentials is crucial in a CI/CD pipeline.\"_\n\nGitLab's [secret detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/) enables users to scan their repositories for exposed secrets and take action based on the scan results.\n\nWith secret detection, users can see scan results in multiple places such as GitLab's [vulnerability report](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/index.html) and [security dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/), and users can configure [automatic responses to leaked secrets](https://docs.gitlab.com/ee/user/application_security/secret_detection/automatic_response.html).\n\n### Development process mitigations\nHere are features that support development process mitigations.\n\n#### Integrate security scanning as part of the CI/CD pipeline\n_\"Include security scanning early in the CI/CD process.\"_\n\nThe CSI recommends the implementation of the following tools:\n* static application security testing (SAST)\n* registry scanning\n* dynamic analysis security testing\n\nGitLab supports these recommendations through its [SAST](https://docs.gitlab.com/ee/user/application_security/sast/), [dynamic application security testing (DAST)](https://docs.gitlab.com/ee/user/application_security/dast/), [container scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/), and [dependency scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/) features. GitLab also offers additional scanning features such as [code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html) and [dynamic API security testing (DAST API)](https://docs.gitlab.com/ee/user/application_security/dast_api/).\n\nTogether, these [Secure stage](https://about.gitlab.com/features/?stage=secure) features provide comprehensive coverage to help you write secure code faster.\n\n#### Restrict untrusted libraries and tools\n_\"Only use software, tools, libraries, and artifacts from secure and trusted sources.\"_\n\nIn addition to [dependency scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/), GitLab's [license compliance](https://docs.gitlab.com/ee/user/compliance/license_compliance/index.html) feature enables organizations to incorporate trusted dependencies into their codebase that meet their unique business and security requirements.\n\nWith license compliance, you can check that your dependencies' licenses are compatible with your business and security requirements, and you can approve or deny dependencies based on configured license approval policies.\n\nNote: License compliance is only available for GitLab Ultimate users.\n\n#### Analyze committed code\n_\"Securing the CI/CD pipeline involves analyzing the code that is being committed, which can be achieved manually or by using automated tools.\"_\n\nAs an all-inclusive DevSecOps platform, GitLab supports a seamless and comprehensive approach to reviewing code changes.\n\nWith the scanning features mentioned [above](#integrate-security-scanning-as-part-of-the-cicd-pipeline), you can enable automated code reviews to help identify vulnerabilities, logic flaws, and policy violations.\n\nGitLab's [MR review](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/) feature streamlines the manual code review process. [Suggested Reviewers](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/#suggested-reviewers) makes it easy to identify users who are authorized to review and merge your changes.\n\n![Suggested Reviewers](https://about.gitlab.com/images/blogimages/2023-09-07-how-gitlab-supports-the-nsa-and-cisa-cicd-security-guidance/suggested-reviewers.png){: .shadow.small.center}\n\nSuggested Reviewers\n{: .note.text-center}\n\nMR approval [rules](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/rules.html) and [settings](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/settings.html) help ensure your code review requirements are enforced in a programmatic way.\n\n#### Remove any temporary resources\n_\"A CI/CD pipeline may also create temporary resources, such as virtual machines or Kubernetes clusters, to run tests. While test environments are usually always live, these temporary resources are meant to be created for a single test purpose and must be destroyed after the pipeline run.\"_\n\nWithin GitLab, a temporary runner VM hosts and runs each CI job. GitLab automatically issues a command to remove the temporary runner VM immediately after the CI job completes. Additional details on this process can be found in our documentation for [Security for SaaS runners](https://docs.gitlab.com/ee/ci/runners/#security-for-saas-runners).\n\n#### Keep audit logs\n_\"An audit log should provide clear information on who committed, reviewed, and deployed what, when, and where.\"_\n\nAs outlined in this [blog post](https://about.gitlab.com/blog/how-gitlab-can-support-your-iso-compliance-journey/), GitLab enables you to use [audit events](https://docs.gitlab.com/ee/administration/audit_events.html) to track important events, including who performed the related action and when. Audit events cover a broad range of categories, including:\n* group management\n* authentication and authorization\n* user management\n* compliance and security\n* CI/CD\n* GitLab Runners\n\n![Audit events](https://about.gitlab.com/images/blogimages/2023-08-24-how-gitlab-can-support-your-iso-compliance-journey/example-of-an-audit-event.png)\n\nExample of an audit event\n{: .note.text-center}\n\nFor [Ultimate](https://about.gitlab.com/pricing/ultimate/) customers, [audit event streaming](https://docs.gitlab.com/ee/administration/audit_event_streaming/index.html) can be enabled. Audit event streaming enables users to set a streaming destination for a top-level group or instance to receive all audit events about the group, subgroups, and projects, as structured JSON.\n\n#### Implement an SBOM and SCA \n_\"A software bill of materials (SBOM) and software composition analysis (SCA) can play a useful role in the software development lifecycle (SDLC) and in DevSecOps by helping to track all third-party and open source components in the codebase.\"_\n\nGitLab's [dependency list](https://docs.gitlab.com/ee/user/application_security/dependency_list/) feature enables you to review your project or group’s dependencies, including their known vulnerabilities. \n\nCombining GitLab's dependency list feature with its [SCA](#restrict-untrusted-libraries-and-tools) suite of features supports a comprehensive strategy for identifying and remediating vulnerabilities and risks within your supply chain.\n\n![Dependency List](https://about.gitlab.com/images/blogimages/2023-09-07-how-gitlab-supports-the-nsa-and-cisa-cicd-security-guidance/dependency-list.png)\n\nExample of dependency list results\n{: .note.text-center}\n\nNote: Dependency list is only available for GitLab Ultimate users.\n\n#### Plan, build, and test for resiliency\n_\"Build the pipeline for high availability, and test for disaster recovery periodically.\"_\n\nAs a SaaS provider, GitLab prioritizes your resiliency and efficiency needs. We maintain robust [business continuity](https://about.gitlab.com/handbook/business-technology/gitlab-business-continuity-plan/) and [disaster recovery](https://gitlab.com/gitlab-com/gl-infra/readiness/-/blob/master/library/disaster-recovery/index.md) strategies to support the availability of the GitLab platform, and we provide helpful strategies for GitLab users to maintain [pipeline efficiency](https://docs.gitlab.com/ee/ci/pipelines/pipeline_efficiency.html).\n\nIf you'd like to learn more about what we're doing to maintain the security, confidentiality, and availability of the GitLab platform, please request our [Customer Assurance Package](https://about.gitlab.com/security/cap/).\n\n## Learn more\nAs a comprehensive DevSecOps platform, GitLab supports a broad range of requirements and recommendations. CI/CD environments have become lucrative targets for malicious actors, and the CSI provides excellent guidance for protecting such a critical component of an organization's assets. As a strategic partner, GitLab supports your efforts to safeguard your CI/CD environment and enables you to develop secure software faster. \n\nTo learn more about these features, have a look at our library of [tutorials](https://docs.gitlab.com/ee/tutorials/).\n",[697,695,720,9],{"slug":3151,"featured":6,"template":700},"how-gitlab-supports-the-nsa-and-cisa-cicd-security-guidance","content:en-us:blog:how-gitlab-supports-the-nsa-and-cisa-cicd-security-guidance.yml","How Gitlab Supports The Nsa And Cisa Cicd Security Guidance","en-us/blog/how-gitlab-supports-the-nsa-and-cisa-cicd-security-guidance.yml","en-us/blog/how-gitlab-supports-the-nsa-and-cisa-cicd-security-guidance",{"_path":3157,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3158,"content":3164,"config":3170,"_id":3172,"_type":14,"title":3173,"_source":16,"_file":3174,"_stem":3175,"_extension":19},"/en-us/blog/how-indeed-transformed-its-ci-platform-with-gitlab",{"title":3159,"description":3160,"ogTitle":3159,"ogDescription":3160,"noIndex":6,"ogImage":3161,"ogUrl":3162,"ogSiteName":685,"ogType":686,"canonicalUrls":3162,"schema":3163},"How Indeed transformed its CI platform with GitLab","The world's #1 job site migrated thousands of projects to GitLab CI, boosting productivity and cutting costs. Learn the benefits they realized, including a 79% increase in daily pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099351/Blog/Hero%20Images/Blog/Hero%20Images/Indeed-blog-cover-image-2_4AgA1DkWLtHwBlFGvMffbC_1750099350771.png","https://about.gitlab.com/blog/how-indeed-transformed-its-ci-platform-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Indeed transformed its CI platform with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Carl Myers\"}],\n        \"datePublished\": \"2024-08-27\",\n      }",{"title":3159,"description":3160,"authors":3165,"heroImage":3161,"date":3167,"body":3168,"category":1288,"tags":3169},[3166],"Carl Myers","2024-08-27","***Editor's note: From time to time, we invite members of our customer community to contribute to the GitLab Blog. Thanks to Carl Myers, Manager of CI Platforms at Indeed, for sharing your experience with GitLab.***\n\nHere at Indeed, our mission is to help people get jobs. Indeed is the [#1 job site](https://www.indeed.com/about?isid=press_us&ikw=press_us_press%2Freleases%2Faward-winning-actress-viola-davis-to-keynote-indeed-futureworks-2023_textlink_https%3A%2F%2Fwww.indeed.com%2Fabout) in the world with more than 350 million unique visitors every month.\n\nFor Indeed's Engineering Platform teams, we have a slightly different motto: \"We help people to help people get jobs.\" As part of a data-driven engineering culture that has spent the better part of two decades always putting the job seeker first, we are responsible for building the tools that not only make this possible, but empower engineers to deliver positive outcomes to job seekers every day.\n\nGitLab Continuous Integration has allowed Indeed’s CI Platform team of just 11 people to effectively support thousands of users across the company. Other benefits Indeed has realized by moving to GitLab CI include:\n- 79% increase in daily pipelines\n- 10-20% lower CI hardware costs\n- Decreased support burden\n\n## Evolving our CI platform: From Jenkins to a scalable solution\n\nLike many large technology companies, we built our CI platform organically as the company scaled, using the de facto open source and industry standard solutions available at the time. Back in 2007, when Indeed had fewer than 20 engineers, we were using Hudson, Jenkins’ direct predecessor.\n\nToday, through nearly two decades of growth, we have thousands of engineers. As new technology became available, we made incremental improvements, switching to Jenkins around 2011. Another improvement allowed us to move most of our workloads to dynamic cloud worker nodes using [AWS EC2](https://aws.amazon.com/ec2/). As we entered the Kubernetes age, however, the system architecture reached its limits.\n\nJenkins’ architecture was not created with the cloud in mind. Jenkins operates by having a \"controller\" node, a single point of failure that runs critical parts of a pipeline and farms out certain steps to worker nodes (which can scale horizontally to some extent). Controllers are also a manual scaling axis.\n\nIf you have too many jobs to fit on one controller, you must partition your jobs across controllers manually. CloudBees offers ways to mitigate this, including the CloudBees Jenkins Operations Center, which allows you to manage your constellation of controllers from a single centralized place. However, controllers remain challenging to run in a Kubernetes environment because each controller is a fragile single point of failure. Activities like node rollouts or hardware failures cause downtime.\n\nIn addition to the technical limitations baked into Jenkins itself, our CI platform also had several problems of our own making. For example, we used the Groovy Jenkins DSL to generate jobs from code in each repository. This led to each project having its own copy-pasted job pipeline, resulting in hundreds of versions that were hard to maintain and update. While Indeed’s engineering culture values flexibility and allows teams to operate in separate repositories, this flexibility became a burden as teams spent too much time addressing regular maintenance requests.\n\nRecognizing our technical debt, we turned to the [Golden Path pattern](https://tag-app-delivery.cncf.io/whitepapers/platforms/), which allows flexibility while providing a default route to simplify updates and encourage consistent practices across projects.\n\nThe CI Platform team at Indeed is not very large. Our team of around 11 engineers supports thousands of users, fielding support requests, performing upgrades and maintenance, and enabling always-on support for our global company.\n\nBecause our team not only supports our GitLab instance but also the entire CI platform, including the artifact server, our shared build code, and multiple other custom components of our platform, we had our work cut out for us. We needed a plan that would help us address our challenges while making the most efficient use of our existing resources.\n\n## Moving to GitLab CI\n\nAfter a careful design review with key stakeholders, we decided to migrate the entire company from Jenkins to GitLab CI. The primary reasons for choosing GitLab CI were:\n- We were already using GitLab for source code management.\n- GitLab is a complete offering that provides everything we need for CI.\n- GitLab CI is designed for scalability and the cloud.\n- GitLab CI enables us to write templates that extend other templates, which is compatible with our golden path strategy.\n- GitLab is open source software and the GitLab team has always been supportive in helping us submit fixes, giving us extra flexibility and reassurance.\n\nBy the time we officially announced that the GitLab CI Platform would be generally available to users, we already had 23% of all builds happening in GitLab CI from a combination of grassroots efforts and early adopters.\n\nThe challenge of the migration, however, would be the long tail. Due to the number of custom builds in Jenkins, an automated migration tool would not work for the majority of teams. Most of the benefits of the new system would not come until the old system was at 0%. Only then could we turn off the hardware and save the CloudBees license fee.\n\n## Feature parity and the benefits of starting over\n\nThough we support many different technologies at Indeed, the three most common languages are Java, Python, and JavaScript. These language stacks are used to make libraries, deployables (web services or applications), and cron jobs (a process that runs at regular intervals, for example, to build a data set in our data lake). Each of these formed a matrix of project types (Java Library, Python Cronjob, JavaScript Webapp, etc.) for which we had a skeleton in Jenkins. Therefore, we had to produce a golden path template in GitLab CI for each of these project types.\n\nMost users could use these recommended paths without change, but for those who did require customization, the golden path would still be a valuable starting point and enable them to change only what they needed, while still benefiting from centralized template updates in the future.\n\nWe quickly realized that most users, even those with customizations, were happy to take the golden path and at least try it. If they missed their customizations, they could always add them later. This was a surprising result! We thought that teams who had invested in significant customization would be loath to give them up, but in the majority of cases teams just didn't care about them anymore. This allowed us to migrate many projects very quickly — we could just drop the golden path (a small file about 6 lines long with includes) into their project, and they could take it from there.\n\n## InnerSource to the rescue\n\nThe CI Platform team also adopted a policy of \"external contributions first\" to encourage everyone in the company to participate. This is sometimes called InnerSource. We wrote tests and documentation to enable external contributions — contributions from outside our immediate team — so teams that wanted to write customizations could instead include them in the golden path behind a feature flag. This let them share their work with others and ensure we didn't break them moving forward (because they became part of our codebase, not theirs).\n\nThis also had the benefit that particular teams who were blocked waiting for a feature they needed were empowered to work on the feature themselves. We could say \"we plan to implement the feature in a few weeks, but if you need it earlier than that we are happy to accept a contribution.\" In the end, many core features necessary for parity were developed in this manner, more quickly and better than our team had resources to do it. The migration would not have been a success without this model.\n\n## Ahead of schedule and under budget\n\nOur CloudBees license expired on April 1, 2024. This gave us an aggressive target to achieve the full migration. This was particularly ambitious considering that at the time, 80% of all builds (60% of all projects) still used Jenkins for their CI. This meant over 2,000 [Jenkinsfiles](https://www.jenkins.io/doc/book/pipeline/jenkinsfile/) would still need to be rewritten or replaced with our golden path templates.\n\nTo achieve this target, we made documentation and examples available, implemented features where possible, and helped our users contribute features where they were able.\n\nWe started regular office hours, where anyone could come and ask questions or seek our help to migrate. We additionally prioritized support questions relating to migration ahead of almost everything else. Our team became GitLab CI experts and shared that expertise inside our team and across the organization.\n\nAutomatic migration for most projects was not possible, but we discovered it could work for a small subset of projects where customization was rare. We created a Sourcegraph batch change campaign to submit merge requests to migrate hundreds of projects, and poked and prodded our users to accept these MRs.\n\nWe took success stories from our users and shared them widely. As users contributed new features to our golden paths, we advertised that these features \"came free\" when you migrated to GitLab CI. Some examples included built-in security and compliance scanning, Slack notifications for CI builds, and integrations with other internal systems.\n\nWe also conducted a campaign of aggressive \"scream tests.\" We automatically disabled Jenkins jobs that hadn't run or succeeded in a while, and told users that if they needed them, they could turn them back on. This was a low-friction way to identify which jobs were actually needed. We had thousands of jobs that hadn't been run a single time since our last CI migration (which was Jenkins to Jenkins). This told us we could safely ignore almost all of them.\n\nIn January 2024, we nudged our users by announcing that all Jenkins controllers would become read-only (no builds) unless an exception was explicitly requested. We had much better ownership information for controllers and they generally aligned with our organization's structure, so it made sense to focus on controllers rather than jobs. The list of controllers was also a much more manageable list than the list of jobs.\n\nTo obtain an exception, we asked our users to find their controllers in a spreadsheet and put their contact information next to each one. This enabled us to get a guaranteed up-to-date list of stakeholders we could follow up with as we sprinted to the finish line, but also enabled users to clearly let us know which jobs they absolutely needed. At peak, we had about 400 controllers; by January we had 220, but only 54 controllers required exceptions (several of them owned by us, to run our tests and canaries).\n\n![Indeed - Jenkins Controller Count graph](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099357/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099357392.png)\n\nWe had a manageable list of around 50 teams we divided among our team and started doing outreach to understand how each team was progressing with the migration. We spent January and February discovering that some teams planned to finish their migration without our help before February 28 others were planning to deprecate their projects before then, and a very small number were very worried they wouldn't make it.\n\nWe were able to work with this smaller set of teams and provide them with “white-glove” service. We still explained that while we lacked the expertise necessary to do the migration for them, we could partner with a subject matter expert from their team. For some projects, we wrote and they reviewed; for others, they wrote and we reviewed. In the end, all of our work paid off and we turned off Jenkins on the very day we had announced 8 months earlier.\n\n## The results: Enhanced CI efficiency and user satisfaction\n\nAt its peak, our Jenkins CI platform ran over 14,000 pipelines per day and serviced our thousands of projects. Today, our GitLab CI platform has run over 40,000 pipelines in a single day and regularly runs over 25,000 per day. The incremental cost of each job of each pipeline is similar to Jenkins, but without the overhead of hardware to run the controllers. Additionally, these controllers served as single points of failure and scaling limiters that forced us to artificially divide our platform into segments. While an apples-to-apples comparison is difficult, we find that with this overhead gone our CI hardware costs are 10-20% lower. Additionally, the support burden of GitLab CI is lower since the application automatically scales in the cloud, has cross-availability-zone resiliency, and the templating language has excellent public documentation available.\n\nA benefit just as important, if not moreso, is that now we are at over 70% adoption of our golden paths. This means that we can roll out an improvement and over 5,000 projects at Indeed will benefit immediately with no action required on their part. This has enabled us to move some jobs to more cost-effective ARM64 instances, keep users' build images updated more easily, and better manage other cost saving opportunities. Most importantly, our users are happier with the new platform.\n\n__About the author:__\n*Carl Myers lives in Sacramento, CA, and is the manager of the CI Platform team at Indeed. Carl has spent his nearly two-decade career dedicated to building internal tools and developer platforms that delight and empower engineers at companies large and small.*\n\n**Acknowledgements:**\n*This migration would not have been possible without the tireless efforts of Tron Nedelea, Eddie Huang, Vivek Nynaru, Carlos Gonzalez, Lane Van Elderen, and the rest of the CI Platform team. The team also especially appreciates the leadership of Deepak Bitragunta, and Irina Tyree for helping secure buy-in, resources and company wide alignment throughout this long project. Finally, our thanks go out to everyone across Indeed who contributed code, feedback, bug reports, and helped migrate projects.*\n\n**This is an edited version of the article [How Indeed Replaced Its CI Platform with Gitlab CI](https://engineering.indeedblog.com/blog/2024/08/indeed-gitlab-ci-migration/), originally published on the Indeed engineering blog.**",[720,9,763,495],{"slug":3171,"featured":91,"template":700},"how-indeed-transformed-its-ci-platform-with-gitlab","content:en-us:blog:how-indeed-transformed-its-ci-platform-with-gitlab.yml","How Indeed Transformed Its Ci Platform With Gitlab","en-us/blog/how-indeed-transformed-its-ci-platform-with-gitlab.yml","en-us/blog/how-indeed-transformed-its-ci-platform-with-gitlab",{"_path":3177,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3178,"content":3183,"config":3188,"_id":3190,"_type":14,"title":3191,"_source":16,"_file":3192,"_stem":3193,"_extension":19},"/en-us/blog/how-to-automate-software-delivery-using-quarkus-and-gitlab",{"title":3179,"description":3180,"ogTitle":3179,"ogDescription":3180,"noIndex":6,"ogImage":1032,"ogUrl":3181,"ogSiteName":685,"ogType":686,"canonicalUrls":3181,"schema":3182},"How to automate software delivery using Quarkus and GitLab","Here's a step-by-step guide to automated software delivery using Supersonic Subatomic Java (Quarkus) and GitLab.","https://about.gitlab.com/blog/how-to-automate-software-delivery-using-quarkus-and-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to automate software delivery using Quarkus and GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2022-06-09\",\n      }",{"title":3179,"description":3180,"authors":3184,"heroImage":1032,"date":3185,"body":3186,"category":741,"tags":3187},[1506],"2022-06-09","\n\nIn this day and age, organizations need to deliver innovative solutions faster than ever to their customers to stay competitive. This is why solutions that speed up software development and delivery, such as Quarkus and GitLab, are being adopted by teams across the world.\n\n[Quarkus](https://quarkus.io/), also known as the Supersonic Subatomic Java, is an open source Kubernetes-native Java stack tailored for OpenJDK HotSpot and GraalVM, crafted from respected Java libraries and standards. Quarkus has been steadily growing in popularity and use because of the benefits that it delivers: cost savings, faster time to market/value, and reliability. Quarkus offers two modes: Java and native. Its Java mode builds your application using the JDK and its native mode compiles your Java code into a native executable.\n\nGitLab, the One DevOps Platform, includes capabilities for all DevOps stages, from planning to production, all with a single model and user interface to help you ship secure code faster to any cloud and drive business results. Besides DevOps support, GitLab also offers GitOps support.\n\nThe combination of Quarkus and GitLab can empower your developers and operations teams to collaborate better, spend more time innovating to deliver business value and differentiating capabilities to end users.\n\nIn this article, we show how to automate the software delivery of a generated Quarkus application in Java mode using GitLab Auto DevOps. Below we list the steps how to accomplish this.\n\n## Prerequisite\n\nThe prerequisite for the subsequent instructions is to have a K8s cluster up and running and associated to a group in your GitLab account. For an example on how to do this, please watch this [video](https://youtu.be/QRR3WuwnxXE).\n\n## Generate your Quarkus project using the generator and upload to GitLab\n\n- From a browser window, point to the Quarkus generator site, https://code.quarkus.io, and click on the button **Generate your application**.\n\n![Generate Quarkus app](https://about.gitlab.com/images/blogimages/quarkusone.png){:small.center.}\n\nGenerate a sample Quarkus application using the generator\n{: .note.text-center}\n\n- On the popup window, click on the button **DOWNLOAD THE ZIP**, to download a sample Quarkus application in a ZIP file to your local machine. The downloaded file is named `code-with-quarkus.zip`.\n\n- Unzip the file on your local machine in a directory of your choice. This will create a new directory called `code-with-quarkus` with all the files for the sample Quarkus application.\n\n- From a browser window, open https://gitlab.com, and log in using your GitLab credentials.\n\n- Head over to the GitLab group to which you associated your K8s cluster and create a blank project named `code-with-quarkus`.\n\n![Create project code-with-quarkus](https://about.gitlab.com/images/blogimages/quarkustwo.png){: .shadow.small.center.wrap-text}\nCreate project code-with-quarkus\n{: .note.text-center}\n\n- From a Terminal window on your local machine, change directory to the newly unzipped directory `code-with-quarkus` and execute the command `rm .dockerignore` to delete the `.dockerignore` file that came with the sample Quarkus application. After removing this file, execute the following commands to populate your newly create Git project `code-with-quarkus` with the contents of this directory:\n\n**NOTE:** Depending on your version of git installed on your local machine, the commands below may vary. Keep in mind that the goal of the steps below is to upload the project on your local machine to your newly created GitLab project.\n\n```\ngit init\ngit remote add origin https://gitlab.com/[REPLACE WITH PATH TO YOUR GROUP]/code-with-quarkus.git\ngit add .\ngit commit -m \"Initial commit\"\ngit push --set-upstream origin master\n```\n\nAt this point, you should have your sample Quarkus application in your GitLab project `code-with-quarkus`.\n\n## Modify the generated Dockerfile.jvm file and indicate its location\n\nSince the location of the Dockerfile is not at the root level of the project, we need to create a project variable DOCKERFILE_PATH and set it to `src/main/docker/Dockerfile.jvm` to indicate to the Auto Build job where to find the Dockerfile to build the container image.\n\n- From your `code-with-quarkus` GitLab project window, select **Settings > CI/CD** from the left vertical navigation menu.\n\n- Scroll to the **Variables** section on the screen and click on the **Expand** button on the right hand side of the section.\n\n- Click on the **Add Variable** button and enter the following values for the fields in the popup:\n\n```\nKey = DOCKERFILE_PATH\nValue = src/main/docker/Dockerfile.jvm\nType = Variable\nEnvironment scope = All (default)\nProtect variable Flag = ensure this flag is unchecked\nMask variable Flag = ensure this flag is unchecked\n```\n\nThe variable definition should look as follows:\n\n![Add var dockerfilepath](https://about.gitlab.com/images/blogimages/quarkusthree.png){: .shadow.small.center.wrap-text}\nAdd DOCKERFILE_PATH variable to the your code-with-quarkus project\n{: .note.text-center}\n\n- Click on the **Add variable** button to complete adding this variable to your project\n\nIn order for Auto Build to work, we need to make some minor modifications to the generated Dockerfile.jvm in the sample Quarkus application.\n\n- From your `code-with-quarkus` GitLab project window, navigate to the directory `src/main/docker` and click on the file `Dockerfile.jvm`. Click on the **Edit** button to start making changes to this file.\n\n- At the top of the file, you will see about 77 lines of comments. Replace all the lines following the comments with the following code segment:\n\n```\n####\nFROM openjdk:11 as builder\nRUN mkdir /build\nADD . /build/\n\nWORKDIR /build\nRUN ./mvnw package\n\nFROM registry.access.redhat.com/ubi8/openjdk-11:1.11\n\nENV LANG='en_US.UTF-8' LANGUAGE='en_US:en'\n\n# We make four distinct layers so if there are application changes the library layers can be re-used\nCOPY --from=builder --chown=185 /build/target/quarkus-app/lib/ /deployments/lib/\nCOPY --from=builder --chown=185 /build/target/quarkus-app/*.jar /deployments/\nCOPY --from=builder --chown=185 /build/target/quarkus-app/app/ /deployments/app/\nCOPY --from=builder --chown=185 /build/target/quarkus-app/quarkus/ /deployments/quarkus/\n\nEXPOSE 8080\nUSER 185\nENV AB_JOLOKIA_OFF=\"\"\nENV JAVA_OPTS=\"-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager\"\nENV JAVA_APP_JAR=\"/deployments/quarkus-run.jar\"\n```\n\nThe lines above add a build stage called`builder` to do the Java build using the openjdk:11 image and adds a `build` working directory to the process. The rest of the lines are effectively the same as the original except that we have updated the paths of the `COPY` commands to find the appropriate files under the `build` working directory.\n\n- Click on the **Commit changes** button at the bottom of the **New file** window to create the new file.\n\n## Update the application port number\n\nThe Auto Deploy job of Auto DevOps defaults to port 5000 for applications but the sample Quarkus application uses port 8080. So, we need to override this value in the helm chart for the Auto Deploy job. This is how you do it:\n\n- From your `code-with-quarkus` GitLab project window, click on **New File** from the pop-down menu next to project root name directory as shown below:\n\n![Select new file](https://about.gitlab.com/images/blogimages/quarkusfour.png){: .shadow.small.center.wrap-text}\nSelect New file from your code-with-quarkus project top-level directory\n{: .note.text-center}\n\n- On the **New file** window, enter `.gitlab/auto-deploy-values.yaml` for the name of the new file and paste the following two lines as the content of the file:\n\n```\nservice:\n  internalPort: 8080\n```\n\nYour window should look as follows:\n\n![Update application port number for Auto Deploy](https://about.gitlab.com/images/blogimages/quarkusfive.png){: .shadow.small.center.wrap-text}\nUpdate the application port number in the helm chart for Auto Deploy\n{: .note.text-center}\n\n- Click on the **Commit changes** button at the bottom of the **New file** window to create the new file.\n\n## Update the version of the JDK\n\nThe sample Quarkus application includes a unit test that is automatically run by the Auto Test job, which uses a Java version not compatible with Quarkus resulting in “java.lang.UnsupportedClassVersionError” exceptions. To solve this, we need to adjust the Java runtime version to 11 since this is the lowest version of the JRE supported by Quarkus. Let’s do this:\n\n- From your `code-with-quarkus` GitLab project window, click on **New File** from the pop-down menu next to project root name directory and name the new file `system.properties`. As its contents, paste the following line into it:\n\n```\njava.runtime.version=11\n```\n\n- Click on the **Commit changes** button at the bottom of the **New file** window to create the new file.\n\n## Enable Auto DevOps\n\nLastly, we need to enable Auto DevOps for your `code-with-quarkus` GitLab project.\n\n- From your `code-with-quarkus` GitLab project window, select **Settings > CI/CD** from the left vertical navigation menu.\n\n- Scroll to the **Auto DevOps** section on the screen and click on the **Expand** button on the right hand side of the section.\n\n- In the section, check the **Default to Auto DevOps pipeline** checkbox. Then, for Deployment strategy, select on the radio button **Automatic deployment to staging, manual deployment to production**. Finally, click on the **Save changes** button. Here’s an example screenshot:\n\n![Enable Auto DevOps](https://about.gitlab.com/images/blogimages/quarkussix.png){: .shadow.small.center.wrap-text}\nEnable Auto DevOps for your sample Quarkus project\n{: .note.text-center}\n\nThis will launch an Auto DevOps pipeline that will build, test and deploy your application first to the staging environment and then give you the option to manually deploy to 100% of the production environment. The completed Auto DevOps pipeline should look like this:\n\n![Completed pipeline](https://about.gitlab.com/images/blogimages/completed-pipe.png){: .shadow}\nCompleted Auto DevOps pipeline for a sample Quarkus application in Java mode\n{: .note.text-center}\n\n## Conclusion\n\nThe combination of Quarkus and GitLab can empower your developers and operations teams to collaborate better, spend more time innovating to deliver business value and differentiating capabilities to end users.\n\nIn this article, we showed how to automate the software delivery of a generated Quarkus application in Java mode using GitLab Auto DevOps. Here is [a working sample project](https://gitlab.com/tech-marketing/sandbox/hn/code-with-quarkus) of this Quarkus application, whose delivery has been automated by GitLab Auto DevOps.\n\n\n\n\n\n\n\n\n\n\n",[721,9,268],{"slug":3189,"featured":6,"template":700},"how-to-automate-software-delivery-using-quarkus-and-gitlab","content:en-us:blog:how-to-automate-software-delivery-using-quarkus-and-gitlab.yml","How To Automate Software Delivery Using Quarkus And Gitlab","en-us/blog/how-to-automate-software-delivery-using-quarkus-and-gitlab.yml","en-us/blog/how-to-automate-software-delivery-using-quarkus-and-gitlab",{"_path":3195,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3196,"content":3202,"config":3208,"_id":3210,"_type":14,"title":3211,"_source":16,"_file":3212,"_stem":3213,"_extension":19},"/en-us/blog/how-to-automate-testing-for-a-react-application-with-gitlab",{"title":3197,"description":3198,"ogTitle":3197,"ogDescription":3198,"noIndex":6,"ogImage":3199,"ogUrl":3200,"ogSiteName":685,"ogType":686,"canonicalUrls":3200,"schema":3201},"How to automate testing for a React application with GitLab","Learn how to add React automated tests to a GitLab CI pipeline with this tutorial.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666775/Blog/Hero%20Images/cover.jpg","https://about.gitlab.com/blog/how-to-automate-testing-for-a-react-application-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to automate testing for a React application with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jeremy Wagner\"}],\n        \"datePublished\": \"2022-11-01\",\n      }",{"title":3197,"description":3198,"authors":3203,"heroImage":3199,"date":3205,"body":3206,"category":718,"tags":3207},[3204],"Jeremy Wagner","2022-11-01","React is a popular JavaScript library for building user interfaces. In this\ntutorial, I'll show you \n\nhow to create a new React application, run unit tests as part of the CI\nprocess in GitLab, and output\n\nthe test results and code coverage into the pipeline.\n\n\n## Prerequisites\n\n\nFor this tutorial you will need the following:\n\n\n- [Node.js](https://nodejs.org/en/) >= 14.0.0 and npm >= 5.6 installed on\nyour system\n\n- [Git](https://git-scm.com/) installed on your system\n\n- A [GitLab](https://gitlab.com/-/trial_registrations/new) account\n\n\n## Getting started\n\n\nTo get started, [create a new project in\nGitLab](https://docs.gitlab.com/ee/user/project/working_with_projects.html#create-a-project).\n\n\nWhen you are on the \"Create new project\" screen, select \"Create blank\nproject.\" Fill out the project information \n\nwith your project name and details. After you create the project, you will\nbe taken to the project with an empty repository.\n\n\nNext, we will clone the repository to your local machine. Copy the SSH or\nHTTPS URL from the \"Clone\" button and run the following\n\ncommand in the terminal for your working directory:\n\n\n```\n\ngit clone \u003Cyour copied URL here>\n\n```\n\n\n## Create the React app\n\n\nYou will create a new React application by using [Create React\nApp](https://reactjs.org/docs/create-a-new-react-app.html#create-react-app).\n\n\nFrom the terminal `cd` into your newly cloned project directory and run this\ncommand:\n\n\n```\n\nnpx create-react-app .\n\n```\n\n\nThe npx CLI tool will create a new React application inside of your current\ndirectory.\n\n\nTo run the application, run the following command in your terminal:\n\n\n```\n\nnpm run start\n\n```\n\n\nYou can view the application you created in your browser window at\n`https://localhost:3000`.\n\n\n![Create React App home\npage](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/create-react-app.png){:\n.shadow}\n\n\nStop your application by pressing `CTRL` + `c` in your terminal. \n\n\nPush your new application to GitLab by running the following commands:\n\n\n```\n\ngit add -A\n\ngit commit -m \"Initial creation of React application\"\n\ngit push\n\n```\n\n\n## Testing your application\n\n\nBy default, Create React App uses [jest](https://jestjs.io/) as the test\nrunner and one unit test to run.\n\n\n```javascript\n\nimport { render, screen } from '@testing-library/react';\n\nimport App from './App';\n\n\ntest('renders learn react link', () => {\n  render(\u003CApp />);\n  const linkElement = screen.getByText(/learn react/i);\n  expect(linkElement).toBeInTheDocument();\n});\n\n```\n\n\nInside your `package.json`, you should see that it comes with several\nscripts.\n\n\n```javascript\n\n\"scripts\": {\n    \"start\": \"react-scripts start\",\n    \"build\": \"react-scripts build\",\n    \"test\": \"react-scripts test\",\n    \"eject\": \"react-scripts eject\",\n  }\n```\n\n\nUse the test script to run the tests in your application by running the\nfollowing command:\n\n\n```\n\nnpm run test\n\n```\n\n\nWhen prompted for \"Watch Usage,\" press `a` to run all of the tests. You will\nsee that the existing test passes and it continues to watch for changes.\n\n\n![CLI passing\ntests](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/passing-test-cli.png){:\n.shadow}\n\n\nFor local development, watching for changes to run the tests is great;\nhowever, for our CI pipeline we would like to run the tests once, \n\ncreate a report so that we can see the results inside of our pipeline, and\nalso determine the code coverage.\n\n\nExit the jest test watcher by pressing `CTRL` + `c` in your terminal. \n\n\n## Add unit test reporting and coverage\n\n\nTo view the unit test report, GitLab requires the runner to upload a JUnit\nreport format XML file.\n\nWe will use `jest-junit` to generate this file. This is a unit test report\nfor jest and will create an XML\n\nfile in the right format.\n\n\nTo install `jest-junit`, run the following command in your terminal:\n\n\n```\n\nnpm install --save-dev jest-junit\n\n```\n\n\nNow, add a new script to run the unit tests inside of your CI pipeline.\n\nAdd a `test:ci` script to your `package.json` that looks like this:\n\n\n```javascript\n\n\"scripts\": {\n    \"start\": \"react-scripts start\",\n    \"build\": \"react-scripts build\",\n    \"test\": \"react-scripts test\",\n    \"eject\": \"react-scripts eject\",\n    \"test:ci\": \"npm run test -- --testResultsProcessor=\\\"jest-junit\\\" --watchAll=false --ci --coverage\"\n  },\n```\n\n\n`--testResultsProcessor=\\\"jest-junit\\\"` tells jest to use the `jest-junit`\nlibrary to create a unit test \n\nreport. `--watchAll=false` disables watch mode so that the tests will not\nrerun when something changes. `--ci` tells \n\nJest that it is running in a CI environment. `--coverage` tells Jest that\ntest coverage information should be collected \n\nand reported in the output. For more information on these options, visit the\n[jest CLI options](https://jestjs.io/docs/cli) documentation.\n\n\n\nIf you run the new `test:ci` script, it will run the tests and create an XML\nfile named `junit.xml` and print coverage statistics to the CLI.\n\n\n\n![CLI code\ncoverage](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/coverage-cli.png){:\n.shadow}\n\n\n## Add unit tests to your CI pipeline\n\n\nIn the root of your application, create a file named `.gitlab-ci.yml`. \n\n\nDefine a test stage for your pipeline by adding the following code to your\n`.gitlab-ci.yml` file:\n\n\n```\n\nstages:\n  - test\n```\n\n\nNext, add a job named `unit-test` that will be responsible for running the\nunit tests in the test stage. Add the following code below the\n\ndefined stages:\n\n\n```\n\nunit-test:\n  image: node:latest\n  stage: test\n  before_script:\n    - npm install\n  script:\n    - npm run test:ci\n  coverage: /All files[^|]*\\|[^|]*\\s+([\\d\\.]+)/\n  artifacts:\n    paths:\n      - coverage/\n    when: always\n    reports:\n      junit:\n        - junit.xml\n```\n\n\nYour complete `.gitlab-ci.yml` file should look like this:\n\n\n```\n\nstages:\n  - test\n\nunit-test:\n  image: node:latest\n  stage: test\n  before_script:\n    - npm install\n  script:\n    - npm run test:ci\n  coverage: /All files[^|]*\\|[^|]*\\s+([\\d\\.]+)/\n  artifacts:\n    paths:\n      - coverage/\n    when: always\n    reports:\n      junit:\n        - junit.xml\n```\n\n\n\nBefore we push these changes to GitLab, add the following line to your\n`.gitignore`:\n\n\n```\n\njunit.xml\n\n```\n\n\nAdd your changes to GitLab by running these commands in your terminal:\n\n\n```\n\ngit add -a\n\ngit commit -m \"Adds .gitlab-ci.yml with unit testing\"\n\ngit push\n\n```\n\n\nWhen this command finishes, your code will be pushed to your project in\nGitLab and a pipeline will start \n\nautomatically running the `unit-test` job we defined earlier.\n\n\n![CI pipeline\nrunning](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/ci-pipeline-starting.png){:\n.shadow}\n\n\nWhen the pipeline completes, click the pipeline ID (_#680073569 in this\ncase_).\n\n\nInside the pipeline, click the _Jobs_ tab and you should see the coverage\nfor the unit-test job is 8.33%.\n\n\n![CI pipeline\ncoverage](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/ci-pipeline-coverage.png){:\n.shadow}\n\n\nClick the _Tests_ tab and you can see the testing results for the unit-test\njob. \n\n\n![CI pipeline\ntests](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/ci-pipeline-tests.png){:\n.shadow}\n\n\nClick the name of the job _unit-test_ and you will see the status for each\nof the test suites run.\n\n\n![CI pipeline test\ndetails](https://about.gitlab.com/images/blogimages/2022-11-04-how-to-automate-testing-for-a-react-application-with-gitlab/ci-pipeline-test-details.png){:\n.shadow}\n\n\nCongratulations! You just added automated tests for your React application\nto your CI pipeline inside of GitLab and output the results to the pipeline.\n\n\nAll code for this tutorial can be found in this\n[project](https://gitlab.com/jwagner-demo/vandelay-industries/engineering/react-app).\n\n\nCover image by [Lautaro\nAndreani](https://unsplash.com/@lautaroandreani?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non\n[Unsplash](https://unsplash.com/s/photos/react?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n\n{: .note}\n\n\n## Related Posts\n\n- [The GitLab guide to modern software\ntesting](https://about.gitlab.com/blog/the-gitlab-guide-to-modern-software-testing/)\n\n- [Unit Test\nReports](https://docs.gitlab.com/ee/ci/testing/unit_test_reports.html)\n\n- [coverage keyword](https://docs.gitlab.com/ee/ci/yaml/#coverage)\n",[874,721,9],{"slug":3209,"featured":6,"template":700},"how-to-automate-testing-for-a-react-application-with-gitlab","content:en-us:blog:how-to-automate-testing-for-a-react-application-with-gitlab.yml","How To Automate Testing For A React Application With Gitlab","en-us/blog/how-to-automate-testing-for-a-react-application-with-gitlab.yml","en-us/blog/how-to-automate-testing-for-a-react-application-with-gitlab",{"_path":3215,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3216,"content":3222,"config":3227,"_id":3229,"_type":14,"title":3230,"_source":16,"_file":3231,"_stem":3232,"_extension":19},"/en-us/blog/how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci",{"title":3217,"description":3218,"ogTitle":3217,"ogDescription":3218,"noIndex":6,"ogImage":3219,"ogUrl":3220,"ogSiteName":685,"ogType":686,"canonicalUrls":3220,"schema":3221},"How to automatically create a new MR on GitLab with GitLab CI","With this script, every time we push a commit, GitLab CI checks if the branch that commit belongs to already has an open MR and, if not, creates one.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679783/Blog/Hero%20Images/whats-next-for-gitlab-ci.jpg","https://about.gitlab.com/blog/how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to automatically create a new MR on GitLab with GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Riccardo Padovani\"}],\n        \"datePublished\": \"2017-09-05\",\n      }",{"title":3217,"description":3218,"authors":3223,"heroImage":3219,"date":3224,"body":3225,"category":718,"tags":3226},[759],"2017-09-05","At [fleetster](https://www.fleetster.net/), we have our own instance of\n[GitLab](https://gitlab.com/) and we rely a lot on [GitLab\nCI](/solutions/continuous-integration/). How could it be otherwise? We are a\nsmall team, with a lot of different projects (only in last month, we had\nmore than **13,000 commits** over **25 different projects**, and we are only\n10 people – with myself working part time). Automating as many development\nsteps as possible (from build to QA to deploy) is helping us a lot, but\nsometimes we write some code and then forget about it. This is a disaster!\nWe have some bug fix or some new feature ready, but it is forgotten in some\nbranch somewhere.\n\n\n\u003C!-- more -->\n\n\nThis is why we have a policy to push as soon as possible to open a new MR,\nmark it as WIP, and assign to ourselves; in this way GitLab will remind us\nwe have an MR.\n\n\nYou need to do three steps to achieve that:\n\n\n* Push the code\n\n* Click on the link that appears on your terminal\n\n* Fill a form\n\n\nBut we are nerds. We are lazy. So one night, after a couple of beers,\n[Alberto Urbano](https://www.linkedin.com/in/alberto-urbano-047a4b19/) and I\nspent some hours to automate a task that requires 10 seconds.\n\n\nActually, the experience was quite fun, it was the first time we used GitLab\nAPIs and we learned things we will apply to others scripts as well.\n\n\n![Image via Riccardo's\nblog](https://about.gitlab.com/images/blogimages/automating-tasks-expectation-versus-reality.png){:\n.shadow}\u003Cbr>\n\n*Image by Randall Munroe,\n[xkcd.com](https://imgs.xkcd.com/comics/automation.png)*\n\n\n### The script\n\n\nWith this script, every time we push a commit, GitLab CI checks if the\nbranch that commit belongs to already has an open MR and, if not, it creates\nit. It then assigns the MR to you, and puts **WIP** in the title to mark it\nas a work in progress.\n\n\nIn this way you cannot forget about that branch, and when you’ve finished\nwriting code on it, you just need to remove the WIP from the title and\nassign to the right person to review it.\n\n\nIn the end, this is the script we came out with (when you add to your\nproject, remember to make it executable):\n\n\n```\n\n#!/usr/bin/env bash\n\n# Extract the host where the server is running, and add the URL to the APIs\n\n[[ $HOST =~ ^https?://[^/]+ ]] && HOST=\"${BASH_REMATCH[0]}/api/v4/projects/\"\n\n\n# Look which is the default branch\n\nTARGET_BRANCH=`curl --silent \"${HOST}${CI_PROJECT_ID}\" --header\n\"PRIVATE-TOKEN:${PRIVATE_TOKEN}\" | python3 -c \"import sys, json;\nprint(json.load(sys.stdin)['default_branch'])\"`;\n\n\n# The description of our new MR, we want to remove the branch after the MR\nhas\n\n# been closed\n\nBODY=\"{\n    \\\"id\\\": ${CI_PROJECT_ID},\n    \\\"source_branch\\\": \\\"${CI_COMMIT_REF_NAME}\\\",\n    \\\"target_branch\\\": \\\"${TARGET_BRANCH}\\\",\n    \\\"remove_source_branch\\\": true,\n    \\\"title\\\": \\\"WIP: ${CI_COMMIT_REF_NAME}\\\",\n    \\\"assignee_id\\\":\\\"${GITLAB_USER_ID}\\\"\n}\";\n\n\n# Require a list of all the merge request and take a look if there is\nalready\n\n# one with the same source branch\n\nLISTMR=`curl --silent \"${HOST}${CI_PROJECT_ID}/merge_requests?state=opened\"\n--header \"PRIVATE-TOKEN:${PRIVATE_TOKEN}\"`;\n\nCOUNTBRANCHES=`echo ${LISTMR} | grep -o\n\"\\\"source_branch\\\":\\\"${CI_COMMIT_REF_NAME}\\\"\" | wc -l`;\n\n\n# No MR found, let's create a new one\n\nif [ ${COUNTBRANCHES} -eq \"0\" ]; then\n    curl -X POST \"${HOST}${CI_PROJECT_ID}/merge_requests\" \\\n        --header \"PRIVATE-TOKEN:${PRIVATE_TOKEN}\" \\\n        --header \"Content-Type: application/json\" \\\n        --data \"${BODY}\";\n\n    echo \"Opened a new merge request: WIP: ${CI_COMMIT_REF_NAME} and assigned to you\";\n    exit;\nfi\n\n\necho \"No new merge request opened\";\n\n```\n\n\n### GitLab CI\n\n\nThe variables used in the script are passed to it by our `.gitlab_ci.yml`\nfile:\n\n\n```\n\nstages:\n    - openMr\n    - otherStages\n\nopenMr:\n    before_script: []   # We do not need any setup work, let's remove the global one (if any)\n    stage: openMr\n    only:\n      - /^feature\\/*/   # We have a very strict naming convention\n    script:\n        - HOST=${CI_PROJECT_URL} CI_PROJECT_ID=${CI_PROJECT_ID} CI_COMMIT_REF_NAME=${CI_COMMIT_REF_NAME} GITLAB_USER_ID=${GITLAB_USER_ID} PRIVATE_TOKEN=${PRIVATE_TOKEN} ./utils/autoMergeRequest.sh # The name of the script\n```\n\n\nAll these environment variables are set by GitLab itself, but the\nPRIVATE-TOKEN. A master of the project has to create it in its own profile\nand add to the project settings.\n\n\nTo create the personal token you can go to `/profile/personal_access_tokens`\non your GitLab instance, and then you add to your pipeline following this\nguide.\n\n\n### Ways to improve\n\n\nThe script is far from perfect.\n\n\nFirst of all, it has two API calls, one to take the list of MR and one to\ntake the default branch, to use it as target. Of course you can hardcode the\nvalue (in the end it shouldn’t change often), but hardcoding is always bad.\n\n\nAlso, it uses python3 to extract the name of the target branch – this is\njust one of many possible solutions, just use what is available on your\nsystem. Apart from that, the script doesn’t have any external dependency.\n\n\nThe other thing is how you need to set up the secret token to call the APIs.\nLuckily, GitLab’s developers are working on a [new\nway](https://gitlab.com/gitlab-org/gitlab-ce/issues/12729) to manage secret\ntokens.\n\n\n### Conclusion\n\n\nThis was a very small and very simple example about how much powerful\nContinuous Integration can be. It takes some time to set up everything, but\nin the long run it will save your team a lot of headache.\n\n\nIn fleetster we use it not only for running tests, but also for having\nautomatic versioning of the software and automatic deploys to testing\nenvironments. We are working to automate other jobs as well (building apps\nand publish them on the Play Store and so on).\n\n\nSpeaking of which, **do you want to work in a young and dynamic office with\nme and a lot of other amazing people?** Take a look at the [open positions\nat fleetster](https://www.fleetster.net/fleetster-team.html)!\n\n\nKudos to the GitLab team (and other guys who help in their free time) for\ntheir awesome work!\n\n\nIf you have any question or feedback about this blog post, please drop me an\nemail at riccardo@rpadovani.com :-)\n\n\nBye for now,\n\nA. & R.\n\n\nP.S: if you have found this article helpful and you’d like we write others,\ndo you mind to help us reaching the Ballmer’s peak and buy us a\n[beer](https://rpadovani.com/donations)?\n\n\nThis post originally appeared on\n[*rpadovani.com*](https://rpadovani.com/open-mr-gitlab-ci).\n\n\n## About the Guest Author\n\n\nRiccardo is a university student and a part-time developer at\n[fleetster](http://www.fleetster.net/). When not busy with university or\nwork, he likes to contribute to open-source projects.\n",[9,763,917],{"slug":3228,"featured":6,"template":700},"how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci","content:en-us:blog:how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci.yml","How To Automatically Create A New Mr On Gitlab With Gitlab Ci","en-us/blog/how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci.yml","en-us/blog/how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci",{"_path":3234,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3235,"content":3241,"config":3248,"_id":3250,"_type":14,"title":3251,"_source":16,"_file":3252,"_stem":3253,"_extension":19},"/en-us/blog/how-to-avoid-broken-master-with-pipelines-for-merge-requests",{"title":3236,"description":3237,"ogTitle":3236,"ogDescription":3237,"noIndex":6,"ogImage":3238,"ogUrl":3239,"ogSiteName":685,"ogType":686,"canonicalUrls":3239,"schema":3240},"How to prevent broken master with merge trains & pipelines","Do you still run pipelines on source branches? Let's start running them on merge commits!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678366/Blog/Hero%20Images/merge-train.jpg","https://about.gitlab.com/blog/how-to-avoid-broken-master-with-pipelines-for-merge-requests","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to avoid broken master with Pipelines for Merged Results and Merge Trains\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Shinya Maeda\"}],\n        \"datePublished\": \"2019-09-11\",\n      }",{"title":3242,"description":3237,"authors":3243,"heroImage":3238,"date":3245,"body":3246,"category":718,"tags":3247},"How to avoid broken master with Pipelines for Merged Results and Merge Trains",[3244],"Shinya Maeda","2019-09-11","\nBroken master. This can happen when CI pipelines run on the master branch (or default branch), but don't\npass all tests. A red cross mark is shown in the project's top page, signalling unstable source\ncode and eroding the trust of users. Broken master could also be a blocker against\na continuous deployment/delivery stream line in which deployment jobs\nare executed after the test stage passed in master pipelines.\n\nAll maintainers want to avoid this critical state,\nbut how can we prevent it?\n\n## Let's look at how master is broken in the first place\n\nLet's say you're one of the maintainers of a project. It's a busy repository with hundreds of merges\nto master every day. A developer assigns a merge request (MR) to you. The MR passed all of the tests in the CI pipelines,\nhas been reviewed thoroughly by code reviewers, all open discussions have been resolved, and the MR has been\napproved by the relevant [code owners](https://docs.gitlab.com/ee/user/project/codeowners/).\n\nYou would press the \"Merge\" button without a second thought, but how are you confident that\na pipeline running on master branch after the merge will pass all tests again?\nIf your answer is \"It might break the master branch,\" then\nyou're right. This could happen, for example, if master has advanced by some\nnew commits, and one of them changed a lint rule. The MR in question\nstill contains an invalid coding style, but the latest pipeline on the MR passes,\nbecause the feature branch is based on an old version of master.\n\nEnter two new GitLab features: [Pipelines for Merged Results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html)\nand [Merge Trains](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/index.html).\nLet me show you how they works and how to enable them.\n\n## How to continually run CI pipelines on the merge commit\n\nLet's break down what went wrong in the scenario above. Even though the pipeline on the\nmerge request passed all the tests, it ran on a source (feature) branch\nwhich could be based on an outdated version of master. In such a case,\nthe result of pipeline is considered as _untrusted_, because there may be a huge difference\nbetween an actual-and-future merge commit and the commit in question.\n\nAs a [boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions), developers can continually rebase their MR\non the latest master, but this is annoying and inefficient, given the speed of\ngrowth of the master branch.\nIt causes a lot of friction between developers and maintainers, slowing down the development cycle.\n\nTo address this problem, we introduced [Pipelines for Merged Results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html)\nin [GitLab 11.10](/releases/2019/04/22/gitlab-11-10-released/#pipelines-for-merged-results).\n\nSimply put, the main difference between pipelines for merged results and normal pipelines is that\n**pipelines run on merge commits, instead of source branches, before the actual merge happens**.\nThis merge commit is generated from the latest commits of target branch and\nsource branch and written in a temporary place (`refs/merge-requests/:iid/merge`).\nTherefore, we can run a pipeline on it without interfering with master.\n\nHere is a sample workflow with the above scenario:\n\n1. A developer pushes a new commit to a merge request.\n1. GitLab creates a merge commit from the HEAD of the source branch and HEAD of the target branch.\n   This merge commit is written in `refs/merge-requests/:iid/merge` and does not change commit history of master branch.\n1. GitLab creates a pipeline on the merge commit, but this pipeline fails because the latest master changed a lint rule.\n1. A maintainer sees a failed pipeline in the merge request.\n\nAs you can see, the maintainer was able to hold off merging the dangerous MR\nbecause the latest pipeline on the MR didn't pass. The feature actually saved\nmaster from a broken state.\n\nAs a bonus, this workflow freeds developers from continual\nrebasing of their merge requests.\nAll they need to do is develop features with [Pipelines for Merged Results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html).\nGitLab automatically creates an expected merge commit and validates the merge request prior to\nan actual merge.\n\n### How to get started with Pipelines for Merged Results\n\nYou can [start using this feature](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html#enabling-pipelines-for-merged-results)\ntoday, with just two steps:\n\n1. Edit the `.gitlab-ci.yml` config file to enable [pipelines for merge requests / merge request pipelines](https://docs.gitlab.com/ee/ci/merge_request_pipelines/).\n1. Enable the \"Merge pipelines will try to validate the post-merge result prior to merging\" option at **Settings > General > Merge requests** in your project.\n\n**Note:** If the configurations in your `.gitlab-ci.yml` file are too complex, you might stumble at the first point.\nWe're currently working on [improving the usability of pipelines for merge requests / merge request pipelines](https://gitlab.com/gitlab-org/gitlab-ce/issues/60085).\nPlease leave your feedback in the issue if that's the case.\n\n## How to avoid race condition of concurrent merges\n\nWith [Pipelines for Merged Results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html),\nwe can confidently say that MRs are continually tested against the latest master branch.\nHowever, what if multiple MRs have been merged at the same time?\nFor example:\n\n- There are two merge requests: MR-1 and MR-2. The latest pipelines have already passed in both MRs.\n- John (maintainer) and Cathy (maintainer) merge MR-1 and MR-2 at the same time, respectively.\n\nLater on, it turns out that MR-2 contains a coding offence which has just been introduced by MR-1.\nMaintainers hit merge without knowing that, and\nneedless to say, this will result in broken master. How can we handle this race condition properly?\n\nIn [GitLab 12.1](/releases/2019/07/22/gitlab-12-1-released/#parallel-execution-strategy-for-merge-trains), we introduced a new feature,\n[Merge Trains](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/).\nBasically, a Merge Train is a queueing system that allows you to avoid this kind\nof race condition.\nAll you need to do is add merge requests to the merge train, and it\nhandles the rest of the work for you.\nIt creates merge commits according\nto the sequence of merge requests and runs pipelines on the expected merge commits.\nFor example, John and Cathy could have avoided broken master with the following workflow:\n\n1. John and Cathy add MR-1 and MR-2 to their [Merge Train](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/), respectively.\n1. In MR-1, the Merge Train creates an expected merge commit from HEAD of the source branch and HEAD of the target branch.\n   It creates a pipeline on the merge commit.\n1. In MR-2, the Merge Train creates an expected merge commit from HEAD of the source branch and the expected merge commit of MR-1.\n   It creates a pipeline on the merge commit.\n1. The pipeline in MR-1 passes all tests and merged into master branch.\n1. The pipeline in MR-2 fails because it violates a lint check which was changed by MR-1. MR-2 is dropped from the Merge Train.\n1. Developer revisits MR-2, fixes the coding offence, and asks Cathy to add it to the Merge Train again.\n\nAs you can see, the Merge Train successfully rejected MR-2 before it could break the master\nbranch. With this workflow, maintainers can feel more confident when they\ndecide to merge something. Also, this doesn't slow down development lifecycle\nthat pipelines are built on optimistic assumption that, in the above case,\nthe pipeline in MR-1 and the pipeline in MR-2 **start almost simultaneously**.\nMR-2 builds a merge commit as if MR-1 has already been merged, so that maintainers\ndon't need to wait for long time until each pipeline finished. If one of the\npipelines failed, the problematic merge request is dropped from the merge train\nand the train will be reconstructed without it.\n\n### How to get started with Merge Trains\n\nYou can [start using Merge Train](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/index.html#how-to-add-a-merge-request-to-a-merge-train)\ntoday, if you've already enabled [Pipelines for merged results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html#enabling-pipelines-for-merged-results). Click [\"Start/Add merge train\" button](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/index.html#how-to-add-a-merge-request-to-a-merge-train) in merge requests.\n\n## A quick demonstration of Merge Trains\n\nHere is a demonstration video that explains the advantage of Merge Train feature.\nIn this video, we'll simulate the common problem in a workflow without\nMerge Trains, and later, we resolve the problem by enabling a Merge Train.\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://www.youtube.com/embed/D4qCqXgZkHQ\" frameborder=\"0\" allowfullscreen=\"true\">\n\u003C/iframe>\n\u003C/figure>\n\n## Wrap up\n\nRunning pipelines on expected merge commits allows us to predict what will happen\nin the future and avoid broken master proactively. It soothes the headache of\nrelease managers and gives maintainers and developers more confidence that their code\nis reliable enough to be merged and shipped. In addition, Merge Trains allow you\nto merge things safely without slowing down the development cycle.\n\nGive this advanced CI/CD feature a try today!\n\nFor more information, check out [the documentation on merge trains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html) and [pipelines for merge requests / merge request pipelines](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html).\n\nCover image by [Dan Roizer](https://unsplash.com/@danny159) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9,896,875,695],{"slug":3249,"featured":6,"template":700},"how-to-avoid-broken-master-with-pipelines-for-merge-requests","content:en-us:blog:how-to-avoid-broken-master-with-pipelines-for-merge-requests.yml","How To Avoid Broken Master With Pipelines For Merge Requests","en-us/blog/how-to-avoid-broken-master-with-pipelines-for-merge-requests.yml","en-us/blog/how-to-avoid-broken-master-with-pipelines-for-merge-requests",{"_path":3255,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3256,"content":3262,"config":3267,"_id":3269,"_type":14,"title":3270,"_source":16,"_file":3271,"_stem":3272,"_extension":19},"/en-us/blog/how-to-build-reusable-ci-templates",{"title":3257,"description":3258,"ogTitle":3257,"ogDescription":3258,"noIndex":6,"ogImage":3259,"ogUrl":3260,"ogSiteName":685,"ogType":686,"canonicalUrls":3260,"schema":3261},"How to build more reusable CI/CD templates","Users can now define inputs to any includable CI/CD templates. Learn how and see what other CI/CD pipeline developments are coming.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682709/Blog/Hero%20Images/pexels-mathias-reding-4386148.jpg","https://about.gitlab.com/blog/how-to-build-reusable-ci-templates","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to build more reusable CI/CD templates\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2023-05-01\",\n      }",{"title":3257,"description":3258,"authors":3263,"heroImage":3259,"date":3264,"body":3265,"category":718,"tags":3266},[1567],"2023-05-01","\n\nThere are exciting new developments to share about our CI/CD templates features, known for their ability to get users up and running quickly with [GitLab CI/CD](/topics/ci-cd/). Our goals for the immediate future are to evolve templates into CI/CD components (more details below) and, soon, to release a CI/CD components catalog to make the reusing and sharing of pipeline configurations easier and more efficient for developers, both inside of their organizations and with the wider developer community. The first step in our journey is to enable users to define inputs to any includable file, ultimately creating more powerful and reusable CI/CD templates.\n\nHere is a short walkthrough on this capability: \n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe width=\"1870\" height=\"937\" src=\"https://www.youtube.com/embed/4ZRdgBy1n5E\" title=\"\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\u003C/figure>\n\n\n## Build more robust and isolated CI/CD templates\nIn GitLab 15.11, we released, as Beta, the ability to define inputs to any includable file (any CI/CD file that you include in your pipeline). Until now, we've been leveraging environment variables to pass information. As an example, we used environment variables to pass information from an upstream pipeline to a downstream pipeline.\n\nUsing environment variables for passing information is like declaring global variables in programming languages – it has an effect on your entire pipeline, which means that the more variables we declare, the more we risk variable conflicts and increased variable scope.\n\nInput parameters are similar to variables passed to the template but exist only inside a specific scope and don't affect other templates in your pipelines. There are several benefits of using inputs, including:\n1. Inputs are not inherited from upstream includes and must be passed explicitly, which means they will never affect your entire pipeline. \n2. Inputs have full support for CI/CD interpolation, which means you have complete flexibility to \"templatize\" your pipeline and use `$[[ inputs.* ]]` across all keywords in your CI/CD configuration. \n3. You can define mandatory and optional inputs to be used as part of your CI/CD templates.\n4. You can define a default value for inputs. \n \nThis paradigm allows users to build more robust and isolated templates (which will soon evolve into components) and enables users to declare and enforce contracts. \n\n### Add your inputs and let us know what you think! \nThe ability to define inputs to a CI/CD configuration file is available right now and we'd love for users to dive in and begin adding inputs to templates. You can check out [the GitLab docs](https://docs.gitlab.com/ee/ci/yaml/includes.html#define-input-parameters-with-specinputs) and review [this example project](https://gitlab.com/grzesiek/ci-interpolation-example) to better understand how to use inputs as part of your daily workflow. If you use this feature and have feedback, please share it with us in this [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/407556).\n\n## What's next in CI/CD pipelines?\nIn GitLab 16.0, we are planning to release an experimental version of a CI/CD pipeline component, which will be the first building block of our CI/CD catalog. A pipeline component is a reusable, single-purpose building block that abstracts a single pipeline configuration unit away. To learn more, please check out this [example project](https://gitlab.com/gitlab-test-ci-catalog/catalog/ruby). \n\n### Why are we moving to components?\nComponents are preconfigured CI/CD files that automate the process of building, testing, and deploying software applications. CI/CD components provide:\n* **Versioning**: Each component is tagged with a version number, so you can reference a specific version or always use the `~latest` version.\n* **Consistency**: CI/CD components ensure consistency in your CI/CD pipelines across different projects, teams, and environments. By using a standardized approach, developers can reduce errors and improve the quality of their code.\n* **Time-savings**: CI/CD components save time by automating repetitive tasks such as running tests, building artifacts, and deploying applications. This enables developers to focus on more important tasks, like writing code and fixing bugs.\n* **Reusability**: CI/CD components can be reused across multiple projects and teams, eliminating the need to create custom scripts for each project. This saves time and reduces the risk of errors.\n* **Scalability**: CI/CD components are scalable and can be used to manage pipeline processes of large and complex applications. This enables developers to easily manage their projects as they grow.\n* **Flexibility**: CI/CD components are highly customizable and can be adapted to suit the needs of different projects, teams, and environments. This allows developers to use the tools and processes that work best for them.\n\nTL;DR: Using CI/CD components can help streamline the development process, save time, reduce errors, and improve the quality of code.\n\n### On the horizon: A CI/CD component catalog\nTo further streamline your development processes, improve the quality of your software delivery, and make it easier for developers to discover and use preconfigured components, we’ll be releasing the CI/CD component catalog, which will make using, creating, and sharing CI/CD components much more efficient and user-friendly, and we’re targeting release of this later this year. In the next months, we’ll be sharing more feature updates, blogs, docs, and demos to keep you posted on our journey toward CI/CD components and a CI/CD component catalog. We’re excited for you to test out the new capabilities as they drop, and we look forward to your feedback.  \n\nCover image by [Mathias Reding](https://www.pexels.com/@matreding/) on [Pexels](https://www.pexels.com/photo/background-of-abstract-modern-architectural-pattern-4386148/).\n{: .note}\n",[9,721],{"slug":3268,"featured":6,"template":700},"how-to-build-reusable-ci-templates","content:en-us:blog:how-to-build-reusable-ci-templates.yml","How To Build Reusable Ci Templates","en-us/blog/how-to-build-reusable-ci-templates.yml","en-us/blog/how-to-build-reusable-ci-templates",{"_path":3274,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3275,"content":3281,"config":3288,"_id":3290,"_type":14,"title":3291,"_source":16,"_file":3292,"_stem":3293,"_extension":19},"/en-us/blog/how-to-choose-the-right-security-scanning-approach",{"title":3276,"description":3277,"ogTitle":3276,"ogDescription":3277,"noIndex":6,"ogImage":3278,"ogUrl":3279,"ogSiteName":685,"ogType":686,"canonicalUrls":3279,"schema":3280},"How to choose the right security scanning approach","GitLab offers multiple scanning methods for CI/CD pipelines, including compliance frameworks and scan and pipeline execution policies. Learn the basics, configurations, and advantages/disadvantages.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097969/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_282096522_securitycompliance.jpeg_1750097968823.jpg","https://about.gitlab.com/blog/how-to-choose-the-right-security-scanning-approach","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to choose the right security scanning approach\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matt Genelin\"},{\"@type\":\"Person\",\"name\":\"Mathias Ewald\"}],\n        \"datePublished\": \"2024-08-26\",\n      }",{"title":3276,"description":3277,"authors":3282,"heroImage":3278,"date":3285,"body":3286,"category":697,"tags":3287},[3283,3284],"Matt Genelin","Mathias Ewald","2024-08-26","Integrating security scans into your CI/CD pipeline is crucial for\nmaintaining robust and secure applications. But who's responsible for those\nscans? Who is responsible for adding them into every CI/CD pipeline for all\nprojects? And who decides which identified vulnerability may pass or needs\nfixing? For organizations in regulated industries, these are critical\nquestions.\n\n\nIn this article, you'll learn how GitLab\n[CI/CD](https://about.gitlab.com/topics/ci-cd/) enables each person in the\nsoftware development lifecycle to incorporate security scanning. You'll also\ndiscover the advantages and disadvantages of the various options available\nto add scanning to GitLab project pipelines. Code examples will help you\nkickstart security scanning on the GitLab DevSecOps platform.\n\n\nArticle contents:\n\n- [The basics of setting up security\nscanning](#the-basics-of-setting-up-security-scanning)\n\n- [Pipeline includes](#pipeline-includes)\n\n- [Compliance frameworks](#compliance-frameworks)\n\n- [Policies](#policies)\n\n- [Get started with security scanning](#get-started-with-security-scanning)\n\n\n## The basics of setting up security scanning\n\n\nGitLab uses [fictional\npersonas](https://handbook.gitlab.com/handbook/product/personas/#user-personas)\nto describe the individual team member who would typically use a given\nsecurity feature or approach. By exploring the perspective of a **Software\nDeveloper (Sasha)**, **Application Security Engineer (Amy)**, or **Platform\nEngineer (Priyanka)**, you can better understand the needs of each role on\nyour team.\n\n\nGitLab follows a \"pipeline-per-project\" principle, stored in the file named\n`.gitlab-ci.yml`. This file contains the project's CI/CD pipeline definition\nand is revision controlled like any other file in the project. You'll learn\nabout these project pipelines, as well as compliance pipelines and policy\npipelines. While compliance pipelines and policy pipelines also refer to the\nYAML files in GitLab projects, they typically have a different file name and\nserve a different purpose.\n\n\nReaders already familiar with security scanning in GitLab will find clarity\nin the security pipeline choices available in the context of your\nteam/organization. Therefore, we will discuss each of the approaches with\nrespect to the following criteria:\n\n\n- **Ease of use:** How easy is it to add security scanning to project\npipelines? Is it a reasonable task for Sasha, or something that Amy and\nPriyanka should handle?\n\n\n- **Customization:** How deeply can scanner configurations be customized\nusing that approach? While default configurations that make sense and cover\na wide range of customer needs are worth gold, the time often comes when\nscanner configurations need adjustments.\n\n\n- **Enforcement:** Is this approach suitable to companies operating in\nregulated industries or that otherwise have global policies in place? Can we\nensure each relevant project runs Scanner X with Configuration Y?\n\n\n## Pipeline includes\n\n\n[GitLab project pipeline\nincludes](https://docs.gitlab.com/ee/ci/yaml/includes.html) are a mechanism\nthat allows the integration of external pipelines into the `.gitlab-ci.yaml`\nproject pipeline. This is similar to including a library in many programming\nlanguages. This powerful feature enables the seamless incorporation of your\nown templates, as well as GitLab-provided templates, to be used as building\nblocks for your pipelines. Includes can be used in project pipelines or\nother pipeline files. An example of a commonly included external pipeline is\nincluding a security scanning pipeline into a GitLab project pipeline.\n\n\nHere are the common types of includes, which use the security scanner\nexample.\n\n\n### Templates\n\n\nGitLab offers ready-to-use\n[templates](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates/Jobs)\nthat can be included in a project pipeline to make it easier for teams to\nadd in various pre-built elements. The following is example code:\n\n\n```yaml\n\ninclude:\n  - template: Jobs/Secret-Detection.gitlab-ci.yml\n  - template: Jobs/SAST.gitlab-ci.yml\n  - template: Jobs/Dependency-Scanning.gitlab-ci.yml\n  - template: Jobs/Container-Scanning.gitlab-ci.yml\n```\n\n\nThis code includes GitLab's templates for [Secret\nDetection](https://docs.gitlab.com/ee/user/application_security/secret_detection/),\n[Static Application Security\nTesting](https://docs.gitlab.com/ee/user/application_security/sast/),\n[Dependency\nScanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/),\nand [Container\nScanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/)\n– all in only five lines of code. \n\n\nTo modify the behavior of jobs included via templates, you can either use\nvariables or use [GitLab's property merging\ncapabilities](https://docs.gitlab.com/ee/ci/yaml/includes.html#merge-method-for-include).\n\n\nYou will find an example of modifying the GitLab Container Scanning pipeline\nusing variables below. The [template for Container\nScanning](https://gitlab.com/gitlab-org/gitlab/-/blob/59f08760feaab1eb0489f694d4f28408af9c2e8d/lib/gitlab/ci/templates/Jobs/Container-Scanning.gitlab-ci.yml)\nneeds to know the location of the image and uses a variable named `CS_IMAGE`\nfor that as is documented in the template code linked above.\n\n\n```yaml\n\nvariables:\n  CS_IMAGE: \"$CI_REGISTRY_IMAGE:$CI_COMMIT_SHORT_SHA\"\n\ninclude:\n  - template: Jobs/Container-Scanning.gitlab-ci.yml\n```\n\n\nThe project pipeline variables are available to included job templates by\ndefining the `CS_IMAGE` variable before the included pipeline template. The\nContainer Scanning template inherits the `CS_IMAGE` variable value. \n\n\nIf we wanted to make changes to the [`allow_failure` property defined\nhere](https://gitlab.com/gitlab-org/gitlab/-/blob/59f08760feaab1eb0489f694d4f28408af9c2e8d/lib/gitlab/ci/templates/Jobs/Container-Scanning.gitlab-ci.yml#L38),\nwe would need to resort to property merging since the job templates employ\nno variable for the value. (The `allow_failure` property is a property\ngenerally available on every GitLab pipeline job. Please check the\n[documentation](https://docs.gitlab.com/ee/ci/yaml/#allow_failure) for\ndetails.)\n\n\nIn this example, `allow_failure` is set to `false`, meaning the entire\npipeline stops on a container scanning failure. This stops any unscanned\ncontainers from moving forward in the pipeline.\n\n\n```yaml\n\ninclude:\n  # Includes a job called \"container_scanning\"\n  - template: Jobs/Container-Scanning.gitlab-ci.yml\n\n# Define a job with same name for merging\n\ncontainer_scanning:\n  allow_failure: false\n```\n\n\nGitLab will load the job template and – as defined in the template code –\nregister a job called `container_scanning`. As the pipeline definition\ndeclares another job with that name, GitLab will merge that specification\nwith the already registered job.\n\n\nWhile this feature offers many possibilities, it also makes it impossible to\nprotect certain properties from being overwritten. We are only at the point\nof modifying the project pipeline, so there's no control over that anyway.\nBut later on, you will see that this can pose a challenge when security\nneeds to be enforced on a project.\n\n\n### Components\n\n\nTemplates are a great start for sharing repeatable GitLab pipelines. To\nfurther abstract reusable code across an entire organization or a GitLab\ninstance, [GitLab introduced\ncomponents](https://docs.gitlab.com/ee/ci/components/). Components are the\nnext logical step in GitLab's evolution of pipelines. Components are\ndesigned to simplify the creation and use of functional building blocks to\nuse in pipelines, or even to package and ship entire pipelines if needed.\nThey offer a well-defined interface, which accepts \"inputs\" for\nconfiguration. Otherwise, the component is completely isolated, which makes\nthem a great candidate to share work within an organization and to be\nsearchable and reusable building blocks.\n\n\nDevelopers can use the [CI/CD Catalog](https://gitlab.com/explore/catalog)\nto browse and search the collection of publicly available GitLab components,\nwhich are components officially built and maintained by GitLab. GitLab uses\nthe CI/CD Catalog [to publish our shipped\ncomponents](https://gitlab.com/components) such as security scanners\nalongside community-provided components.\n\n\nComponents are consumed similarly to templates via the `include` keyword. In\nan example above, we showed how the container scanning job requires\nknowledge of the image location. This \"input\" uses the component for\n[container\nscanning](https://gitlab.com/components/container-scanning/-/blob/19fd5b83bc631cb9890b4fadb08d31b3150853ce/templates/container-scanning.yml)\nis called `cs_image`. The configuration equivalent to the previous example\nlooks like this:\n\n\n```yaml\n\ninclude:\n  - component: $CI_SERVER_FQDN/components/sast/sast@2.0.2\n  - component: $CI_SERVER_FQDN/components/dependency-scanning/cargo@0.2.0\n  - component: $CI_SERVER_FQDN/components/secret-detection/secret-detection@1.1.2\n  - component: $CI_SERVER_FQDN/components/container-scanning/container-scanning@4.1.0\n    inputs:\n      cs_image: \"$CI_REGISTRY_IMAGE:$CI_COMMIT_SHORT_SHA\"\n```\n\nIn this example, the SAST component is pinned at Version 2.0.2, the\nDependency Scanning component at Version 0.2.0, the Secret Detection\ncomponent at Version 1.1.2, and the Container Scanning component at Version\n4.1.0. `~latest` [and more tags are\navailable](https://docs.gitlab.com/ee/ci/components/#component-versions) for\nbleeding-edge component usage and other development needs.\n\n\nWhether you use templates or components, your pipeline might look like the\nimage below. The top four jobs in the test stage are the result of the four\ninclude statements in the code above.\n\n\n![An example\npipeline](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097984/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097983863.png)\n\n\n### Advantages and disadvantages of using pipeline includes\n\n\n#### Ease of use\n\n\nOne of the benefits of using pipeline includes in GitLab is their ease of\nuse. We have seen how, with essentially six lines of code, we included four\ncommonly used security scanners. All the complex logic and setup are handled\nwithin the templates or components, saving Sacha time and effort by\nproviding a ready-to-use solution.\n\n\n#### Customization\n\n\nWhile templates offer the highest flexibility (variables and merging), it's\nimportant to remember that with \"great power comes great responsibility.\"\nThe flexibility of templates supports extensive customization, but requires\ncareful management and oversight to avoid unexpected results.\n\n\nIn contrast, components provide a more structured mechanism for authoring,\nsharing, and maintaining building blocks for a broader audience. Components,\nwhile not as customizable, enhance stability and reliability, and are a\nvaluable, reusable, and repeatable feature. \n\n\n#### Enforcement\n\n\nAs the name _include_ suggests, it is the GitLab project pipeline that needs\nto include templates or components. While scanner templates are\nstraightforward to use, Amy and Priyanka cannot be sure Sacha has included\nthem properly, or even at all. Enforcement of scanner usage is needed.\n\n\nFor regulated industries, managing security in project pipelines is not an\napproach that provides the necessary audit trail or enforcement.\n\n\n## Compliance frameworks\n\n\nGitLab identified the gap between the ability to enforce security scans on\nproject pipelines and the need to [adhere to regulatory compliance\nframeworks](https://about.gitlab.com/blog/meet-regulatory-standards-with-gitlab/)\nsuch as PCI DSS, NIST, and many more. The introduction of compliance\nframeworks as functionality caters to precisely this challenge.\n\n\nAt first glance, a compliance framework in GitLab is merely a label attached\nto a project, which would typically be named after the regulatory framework\nit is supposed to implement. The magic is added with the link between that\nlabel and a compliance pipeline YAML file, which is responsible for\nimplementing the necessary steps to ensure compliance. \n\n\nThe mechanism is straightforward: Every time the project pipeline is\ntriggered, GitLab executes the compliance pipeline instead. The compliance\npipeline runs with both the [CI/CD\nvariables](https://docs.gitlab.com/ee/ci/variables/) and [predefined CI/CD\nvariables](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html)\nof the project pipeline.\n\n\nThis allows for two main design patterns: a \"wrapping pipeline,\" where the\ncompliance pipeline includes the project pipeline, and an \"overriding\npipeline,\" where it does not. \n\n\n**Note:** Compliance pipelines have been deprecated in GitLab Version 17.3\nand are scheduled for removal in Version 19.0. At this point, we cannot\nrecommend implementing this approach for new development platforms. However,\nyou might already be using them, making it worth reading this section.\n\n\n### Wrapping pipelines\n\n\nIn the wrapping approach, the compliance pipeline defines its own jobs\naccording to specific compliance needs. It includes the project pipeline in\nthe same way we have seen templates included in the previous section. This\nsetup is possible because the predefined CI/CD variables originate from the\nproject pipeline, allowing the system to identify the pipeline definition's\nlocation for inclusion.\n\n\nHere is an example of what a simple compliance pipeline might look like. \n\n\n```yaml\n\ninclude:\n  - component: $CI_SERVER_FQDN/components/sast/sast@2.0.2\n  - component: $CI_SERVER_FQDN/components/dependency-scanning/cargo@0.2.0\n  - component: $CI_SERVER_FQDN/components/secret-detection/secret-detection@1.1.2\n  - component: $CI_SERVER_FQDN/components/container-scanning/container-scanning@4.1.0\n  - project: '$CI_PROJECT_PATH'\n    file: '$CI_CONFIG_PATH'\n    ref: '$CI_COMMIT_SHA'\n```\n\n\nThe last three lines include the project pipeline based on available\nvariables.\n\n\n### Overriding pipelines\n\n\nUnlike wrapping pipelines, which include the project pipeline, overriding\npipelines ignore it entirely and run only their own jobs. This type of\npipeline defines each step, encompassing all necessary jobs to build, test,\nand deploy the application.\n\n\nBelow we see a mock compliance pipeline that illustrates this approach.\n\n\n```yaml\n\nstages: [\"build\", \"test\", \"deploy\"]\n\n\ninclude:\n  - component: $CI_SERVER_FQDN/components/sast/sast@2.0.2\n  - component: $CI_SERVER_FQDN/components/dependency-scanning/cargo@0.2.0\n  - component: $CI_SERVER_FQDN/components/secret-detection/secret-detection@1.1.2\n  - component: $CI_SERVER_FQDN/components/container-scanning/container-scanning@4.1.0\n\nbuild-job:\n  stage: build\n  script: echo \"Building the container image\"\n\ntest-job:\n  stage: test\n  script: echo \"Running unit tests\"\n\ndeploy-job:\n  stage: deploy\n  script: echo \"Deploying app\"\n```\n\n\n### Advantages and disadvantages of compliance frameworks\n\n\n#### Ease of use\n\n\nWhile compliance frameworks aren't terribly complicated, they aren't as\nstraightforward and simple as pipeline includes. They're meant to be written\nand assigned to projects by Amy and Priyanka, who now need to interact with\npipeline YAML code. A framework needs to be declared in the top-level\nnamespace and compliance pipelines need to be created and maintained, and\ncompliance frameworks need to be attached to the right projects. \n\n\n#### Customization\n\n\nAmy and Priyanka are the authors of compliance pipelines. Like Sacha in the\nprevious section on includes, they have full control over what they include\nand how they include it, giving them maximum customizability of compliance\njobs such as security scanners.\n\n\n#### Enforcement\n\nThis aspect of enforcing pipelines questions whether developers can tamper\nwith security jobs? In an environment with a strong separation of duties,\nthis nuance requires some extra attention. To answer this, we need to look\nat each pattern separately:\n\n\n##### Wrapping pipelines\n\nAs seen before, project pipelines are included in compliance pipelines. In\naddition to group- or project-level CI/CD variables, every element of that\nproject pipeline must be considered a potential threat to the compliance\npipeline. Obviously, variables and jobs stick out as primary candidates.\nAnd, in fact, they can and will influence security job behavior if used\nmaliciously.\n\n\nHere is a simple example to illustrate the issue.\n\n\nCompliance pipeline:\n\n```yaml\n\ninclude:\n  - template: Jobs/SAST.gitlab-ci.yml\n  - template: Jobs/Secret-Detection.gitlab-ci.yml\n  - project: '$CI_PROJECT_PATH'\n    file: '$CI_CONFIG_PATH'\n    ref: '$CI_COMMIT_SHA'\n```\n\n\nProject pipeline:\n\n```yaml\n\nvariables:\n  SECRET_DETECTION_DISABLED: true\n\nsemgrep-sast:\n  rules:\n    - when: never\n```\n\n\nThis project pipeline declares a variable `SECRET_DETECTION_DISABLED` (this\ncould be done via project or croup-level CI/CD variables, too), which is\nevaluated in the included secret detection template. Further, the last three\nlines use the merging mechanism discussed previously, to not execute the job\nat all. Kind of redundant, we know.\n\n\nBoth overrides could be prevented using components, but you get the idea.\nComponents, too, are receptive to such attacks via their inputs' default\nvalues, which often use variables, too! Let's take a look at how this could\nbe taken advantage of.\n\n\nCompliance pipeline:\n\n```yaml\n\ninclude:\n  - component: $CI_SERVER_FQDN/components/sast/sast@2.0.2\n  - component: $CI_SERVER_FQDN/components/secret-detection/secret-detection@1.1.2\n  - project: '$CI_PROJECT_PATH'\n    file: '$CI_CONFIG_PATH'\n    ref: '$CI_COMMIT_SHA'\n```\n\n\nProject pipeline:\n\n```yaml\n\nvariables:\n  CI_TEMPLATE_REGISTRY_HOST: \"docker.io\"\n```\n\n\nTo understand what is happening here, look at the [SAST scanner component's\nLine\n6](https://gitlab.com/components/sast/-/blob/main/templates/sast.yml?ref_type=heads#L6):\n\n\n```yaml\n\nspec:\n  inputs:\n    stage:\n      default: test\n    image_prefix:\n      default: \"$CI_TEMPLATE_REGISTRY_HOST/security-products\"\n```\n\n\nThe `image_prefix` input uses the `CI_TEMPLATE_REGISTRY_HOST` to build the\ndefault value. By setting this variable to a false value in the same way we\nset `SECRET_DETECTION_DISABLED` to `true` before, Sacha may cause the job to\nload a wrong image and break SAST testing.\n\n\nTo prevent this override ability by the developer role, avoid templates in\nfavor of components. This approach covers many developer-induced loopholes.\nTo be certain of compliance, hardcode values for component inputs.\n\n\n##### Overriding pipelines\n\n\nThis type is an entirely different beast. Developers get no chance of\ninjecting actual pipeline code into the compliance pipeline. However,\ncompliance pipelines do run with the project's CI/CD variables. Hence, any\nvariable specified on the group- or project-level might modify the\ncompliance pipeline's behavior. With `SECRET_DETECTION_DISABLED` set to\n`true` in the project CI/CD variables, the following compliance pipeline can\nbe modified again:\n\n\n```yaml\n\nstages: [\"build\", \"test\", \"deploy\"]\n\n\ninclude:\n  - template: Jobs/SAST.gitlab-ci.yml\n  - template: Jobs/Secret-Detection.gitlab-ci.yml\n\nbuild-job: ...\n\ntest-job: ...\n\ndeploy-job: ...\n\n```\n\n\nComponents can solve this particular problem, but, as before, component\ninputs may use CI/CD variables developers can set. Compliance pipeline\nauthors need to identify and take care of these situations. \n\n\n## Policies\n\n\nCompliance pipelines' shortcomings have led to the next step for managing\ncompliance:\n[policies](https://docs.gitlab.com/ee/user/application_security/policies/).\n\n\nGitLab introduced\n[policies](https://docs.gitlab.com/ee/user/application_security/policies/)\nas the way forward. Authors store a set of policies in a separate project as\nYAML files and apply them to projects on the group or project level. This\ngives Amy and Priyanka the flexibility to target individual projects with\nspecific requirements but also to ensure compliance across the entire\norganization if needed. Access to the policy project can be controlled\nwithin the policy project and audited within GitLab.\n\n\nPolicies come in different types for different purposes. The types we are\ninterested in right now are scan execution policies (SEP) and pipeline\nexecution policies (PEP).\n\n\n### Scan execution policies\n\n\nAs the name suggests, SEPs require a particular scan – or set of scans – to\nbe executed as part of the project pipeline and inject the respective scan\njobs into the pipelines of associated projects. They include the respective\n[template](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates/Jobs)\nin the pipeline according to variables and rules set by Amy and Priyanka.\n\n\nGitLab supports policy authors with a comprehensive user interface in\naddition to a YAML-based Git workflow. The following screenshot and code\nsnippet illustrate a very basic example of a SEP:\n\n\n![Scan execution policy\nexample](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097984/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097983864.png)\n\n\n```yaml\n\nname: Secret Scanner\n\ndescription: ''\n\nenabled: true\n\nactions:\n\n- scan: secret_detection\n\nrules:\n\n- type: pipeline\n  branches:\n  - \"*\"\n```\n\n\nFor more details on SEP settings in the UI and YAML, please refer to the\n[policy\ndocumentation](https://docs.gitlab.com/ee/user/application_security/policies/scan_execution_policies.html).\n\n\n#### Advantages and disadvantages of scan execution policies\n\n\n##### Ease of use\n\nSEPs provide a lightweight, easy-to-use mechanism that enforces security on\nexisting and new CI/CD pipelines across the organization or on a granular\nlevel. The UI support makes them a viable tool for all relevant personas.\n\n\n##### Customization\n\nSEPs are restricted to predefined scanner jobs, and there is no option to\nextend this list with custom jobs at this point. This limitation can be\nrestrictive for teams with unique scanning requirements that fall outside\nthe standard options.\n\n\n##### Enforcement\n\n\nOnce an SEP is applied to a project (directly or indirectly), Sacha has no\nway to get rid of that scan job. Though, there may be ways to –\nintentionally or not – manipulate the scan job's behavior.\n\n\nJobs injected via SEPs generally are receptive to CI/CD variables and adhere\nto the general rules of [variable\nprecedence](https://docs.gitlab.com/ee/ci/variables/index.html#cicd-variable-precedence).\nFor this injection, Policies incorporate logic that denies changing some\npredefined variables as described\n[here](https://docs.gitlab.com/ee/user/application_security/policies/scan_execution_policies.html#cicd-variables)\nand generally deny the configuration of variables that follow certain\npatterns such as `_DISABLED` or  `_EXCLUDED_PATHS`.\n\n\nDespite these security measures, inconsiderate use of policies may still\nopen opportunities for tampering: In my test, I was able to set a\nproject-level CI/CD variable `SECURE_ANALYZERS_PREFIX` to a bad value (a\nnon-existing location) and as you can see\n[here](https://gitlab.com/gitlab-org/gitlab/-/blob/a2d4b8df0095c1363a105a1fa212daf227eca063/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml),\nthe secret detection template uses that to build the location of the scanner\nimage.\n\n\nWhile the scan job does get included in the pipeline run, it crashes very\nearly and, therefore, provides no scan results. Due to the [`allow_failure:\ntrue`\nconfiguration](https://gitlab.com/gitlab-org/gitlab/-/blob/a2d4b8df0095c1363a105a1fa212daf227eca063/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml#L18),\nthe pipeline will continue to run and eventually execute a deploy job.\n\n\nBecause SEP variables take the highest variable precedence, there is an easy\nfix to reduce the attack surface of the policy: Simply hardcode the correct\nvalue in your policy YAML or via the UI:\n\n\n```yaml\n\n- name: Secret Scanner\n  actions:\n  - scan: secret_detection\n    variables:\n      SECURE_ANALYZERS_PREFIX: registry.gitlab.com/security-products\n```\n\n\n### Pipeline execution policies\n\n\nSEPs enable the injection of a set of security-related jobs into any project\npipeline. In contrast, PEPs apply entire pipeline configurations to\nprojects, offering a lot more flexibility when it comes to customizing\nsecurity constraints. \n\n\nThere are two methods for implementing these policies, known as \"actions\":\n`inject` and `override`. These actions function similarly to the patterns we\nhave seen in the compliance frameworks section and provide flexible ways to\nenhance and enforce security standards within the development workflow.\n\n\n#### Injecting pipelines\n\n\nInjecting pipelines involves adding the jobs and other elements defined in\nthe policy pipeline into the project pipeline. Currently, jobs should only\nbe injected into reserved stages, namely `.pipeline-policy-pre` and\n`.pipeline-policy-post` to avoid unpredictable results.\n\n\nGitLab handles name clashes between jobs or variables in policy and project\npipelines effectively by building each pipeline in isolation before\ncombining them. This ensures that the integration process is seamless and\ndoes not disrupt existing workflows or configurations.\n\n\n![security scanning - image\n4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097984/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097983865.png)\n\n\nThe above screenshot shows an example of an injected policy pipeline.\nProject pipeline jobs are prefixed with `prj-` for easier identification.\n\n\n#### Overriding pipelines\n\n\nIn the override approach, the project pipeline is completely replaced by the\npolicy pipeline. This method is similar to compliance pipelines that do not\ninclude the project's `.gitlab-ci.yml` file. Despite the override, the\npipelines run using the project's CI/CD variables, maintaining consistency\nwith project-specific configurations. The compliance pipeline we used\nearlier makes a perfectly fine policy pipeline, too:\n\n\n```yaml\n\nstages: [\"build\", \"test\", \"deploy\"]\n\n\ninclude:\n  - component: $CI_SERVER_FQDN/components/sast/sast@2.0.2\n  - component: $CI_SERVER_FQDN/components/dependency-scanning/cargo@0.2.0\n  - component: $CI_SERVER_FQDN/components/secret-detection/secret-detection@1.1.2\n  - component: $CI_SERVER_FQDN/components/container-scanning/container-scanning@4.1.0\n\nbuild-job:\n  stage: build\n  script: echo \"Building the container image\"\n\ntest-job:\n  stage: test\n  script: echo \"Running unit tests\"\n\ndeploy-job:\n  stage: deploy\n  script: echo \"Deploying app\"\n```\n\n\nThe image below shows a slightly more complete pipeline than the mock\npipeline above:\n\n\n![More complete\npipeline](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097984/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097983866.png)\n\n\n**Note:** This doesn't currently work with SEPs.\n\n\nHowever, the existence of a Dockerfile may not always be a valid indicator,\nas developers might be building without Dockerfiles using Cloud Native\nBuildpacks, Heroku Buildpacks, Kaniko, or other tools. Managed pipelines do\nnot encounter this challenge, as they are more controlled and centralized.\n\n\n\u003C!-- TOC ignore:true -->\n\n### Projects with multiple container images\n\nFor projects that produce multiple container images, several container\nscanning jobs would be necessary for proper coverage. This raises similar\nquestions as before: \"How do we know there are multiple?\" and \"Is the source\nof that information trustworthy?\". If we wanted to rely on the existence of\n`Dockerfile`s a [dynamic\napproach](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html#dynamic-child-pipelines)\nwould be necessary that includes a container scanning job for each\n`Dockerfile` detected.\n\n\n## Get started with security scanning\n\nIn this article, you've learned about a variety of approaches to adding\nsecurity scanning to CI/CD pipelines with a close look at ease of use,\ncustomizability, and the ability to strictly enforce scanning. You've seen\nthat a pipeline author who is held responsible for project compliance needs\nto keep a few things in mind during the process to avoid surprises down the\nline. We recommend building a small testing space on your GitLab instance\nand then run a few tests to reproduce the main points of this article. Put\nyourself in the shoes of a malicious Sacha (Sachas aren't generally\nmalicious people, but it's a good exercise) and think about how you could\nfool that annoying Amy and her security scans.\n\n\nGitLab provides strong support for all sorts of requirements and all\napproaches are – at least in our eyes – easy to implement due the platform's\nbaked-in functionality. You should find ways to bulletproof your scan jobs\nand, if not, you should open a ticket with our support. \n\n\nHappy pipelining!\n\n\n> #### Get started with security scanning today!\n\n> [Sign up for a free trial of GitLab\nUltimate](https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/blog&glm_content=default-saas-trial)\nto implement security scanning in your software development lifecycle.\n\n\n## Read more\n\n\n- [Meet regulatory standards with GitLab security and\ncompliance](https://about.gitlab.com/blog/meet-regulatory-standards-with-gitlab/)\n\n- [How to integrate custom security scanners into\nGitLab](https://about.gitlab.com/blog/how-to-integrate-custom-security-scanners-into-gitlab/)\n\n- [Integrate external security scanners into your DevSecOps\nworkflow](https://about.gitlab.com/blog/integrate-external-security-scanners-into-your-devsecops-workflow/)\n",[697,917,9],{"slug":3289,"featured":91,"template":700},"how-to-choose-the-right-security-scanning-approach","content:en-us:blog:how-to-choose-the-right-security-scanning-approach.yml","How To Choose The Right Security Scanning Approach","en-us/blog/how-to-choose-the-right-security-scanning-approach.yml","en-us/blog/how-to-choose-the-right-security-scanning-approach",{"_path":3295,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3296,"content":3302,"config":3309,"_id":3311,"_type":14,"title":3312,"_source":16,"_file":3313,"_stem":3314,"_extension":19},"/en-us/blog/how-to-create-review-apps-for-android-with-gitlab-fastlane-and-appetize-dot-io",{"title":3297,"description":3298,"ogTitle":3297,"ogDescription":3298,"noIndex":6,"ogImage":3299,"ogUrl":3300,"ogSiteName":685,"ogType":686,"canonicalUrls":3300,"schema":3301},"Review Apps for Android with GitLab, fastlane & Appetize.io","See how GitLab and Appetize.io can bring Review Apps to your Android project","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664102/Blog/Hero%20Images/gitlab-values-cover.png","https://about.gitlab.com/blog/how-to-create-review-apps-for-android-with-gitlab-fastlane-and-appetize-dot-io","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to create Review Apps for Android with GitLab, fastlane, and Appetize.io\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Fontaine\"}],\n        \"datePublished\": \"2020-05-06\",\n      }",{"title":3303,"description":3298,"authors":3304,"heroImage":3299,"date":3306,"body":3307,"category":978,"tags":3308},"How to create Review Apps for Android with GitLab, fastlane, and Appetize.io",[3305],"Andrew Fontaine","2020-05-06","{::options parse_block_html=\"true\" /}\n\n\n\n\nIn a [previous look at GitLab and _fastlane_], we discussed how _fastlane_\nnow\n\nautomatically publishes the Gitter Android app to the Google Play Store, but\nat\n\nGitLab, we live on [review apps], and review apps for Android applications\ndidn't\n\nreally exist... until [Appetize.io] came to our attention.\n\n\nJust a simple extension of our existing `.gitlab-ci.yml`, we can utilize\n\nAppetize.io to spin up review apps of our Android application.\n\n\nIf you'd rather just skip to the end, you can see\n\n[my MR to the Gitter Android project].\n\n\n## Setting up Fastlane\n\n\nFortunately for us, _fastlane_ has integrated support for Appetize.io, so\nall\n\nthat's needed to hit Appetize is the addition of a new `lane`:\n\n\n```diff\n\ndiff --git a/fastlane/Fastfile b/fastlane/Fastfile\n\nindex eb47819..f013a86 100644\n\n--- a/fastlane/Fastfile\n\n+++ b/fastlane/Fastfile\n\n@@ -32,6 +32,13 @@ platform :android do\n     gradle(task: \"test\")\n   end\n\n+  desc 'Pushes the app to Appetize and updates a review app'\n\n+  lane :review do\n\n+    appetize(api_token: ENV['APPETIZE_TOKEN'],\n\n+             path: 'app/build/outputs/apk/debug/app-debug.apk',\n\n+             platform: 'android')\n\n+  end\n\n+\n   desc \"Submit a new Internal Build to Play Store\"\n   lane :internal do\n     upload_to_play_store(track: 'internal', apk: 'app/build/outputs/apk/release/app-release.apk')\n```\n\n\n`APPETIZE_TOKEN` is an Appetize.io API token that can be generated on the\n\n[Appetize API docs] after signing up for an account. Once we add a new job\nand\n\nstage to our `.gitlab-ci.yml`, we will be able to deploy our APK to Appetize\nand\n\nrun them in the browser!\n\n\n```diff\n\ndiff --git a/.gitlab-ci.yml b/.gitlab-ci.yml\n\nindex d9863d7..e4d0ce3 100644\n\n--- a/.gitlab-ci.yml\n\n+++ b/.gitlab-ci.yml\n\n@@ -5,6 +5,7 @@ stages:\n   - environment\n   - build\n   - test\n+  - review\n   - internal\n   - alpha\n   - beta\n@@ -81,6 +82,16 @@ buildRelease:\n   environment:\n     name: production\n\n+deployReview:\n\n+  stage: review\n\n+  image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n\n+  script:\n\n+    - bundle exec fastlane review\n\n+  only:\n\n+    - branches\n\n+  except:\n\n+    - master\n\n+\n testDebug:\n   image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n   stage: test\n```\n\n\nGreat! Review apps will be deployed when branches other than `master` build.\n\nUnfortunately, there is no `environment` block, so there's nothing linking\nthese\n\ndeployed review apps to GitLab. Let's fix that next.\n\n\n## Dynamic Environment URLs\n\n\nPreviously, GitLab only liked environment URLs that used pre-existing CI\n\nvariables (like `$CI_COMMT_REF_NAME`) in their definition. Since 12.9,\nhowever,\n\na [new way of defining environment urls with alternative variables exists].\n\n\nBy creating a `dotenv` file and submitting it as an `artifact` in our build,\nwe\n\ncan define custom variables to use in our environment's URL. As all\nAppetize.io\n\napp URLs take the pattern of `https://appetize.io.app/$PUBLIC_KEY`, where\n\n`$PUBLIC_KEY` is randomly generated when the app is created, we need to get\nthe\n\npublic key from the Appetize response in our `Fastfile`, and put it in a\n\n`dotenv` file.\n\n\n```diff\n\ndiff --git a/fastlane/Fastfile b/fastlane/Fastfile\n\nindex 7b5f9d1..ae3867c 100644\n\n--- a/fastlane/Fastfile\n\n+++ b/fastlane/Fastfile\n\n@@ -13,6 +13,13 @@\n # Uncomment the line if you want fastlane to automatically update itself\n # update_fastlane\n\n+\n\n+def update_deployment_url(pub_key)\n\n+  File.open('../deploy.env', 'w') do |f|\n\n+    f.write(\"APPETIZE_PUBLIC_KEY=#{pub_key}\")\n\n+  end\n\n+end\n\n+\n default_platform(:android)\n\n platform :android do\n@@ -37,6 +44,7 @@ platform :android do\n     appetize(api_token: ENV['APPETIZE_TOKEN'],\n              path: 'app/build/outputs/apk/debug/app-debug.apk',\n              platform: 'android')\n+    update_deployment_url(lane_context[SharedValues::APPETIZE_PUBLIC_KEY])\n   end\n\n   desc \"Submit a new Internal Build to Play Store\"\n```\n\n\nWe also need to add an `environment` block to our `.gitlab-ci.yml` to\ncapture an\n\nenvironment name and URL.\n\n\n```diff\n\ndiff --git a/.gitlab-ci.yml b/.gitlab-ci.yml\n\nindex f5a8648..c834077 100644\n\n--- a/.gitlab-ci.yml\n\n+++ b/.gitlab-ci.yml\n\n@@ -85,12 +85,18 @@ buildCreateReleaseNotes:\n deployReview:\n   stage: review\n   image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n+  environment:\n\n+    name: review/$CI_COMMIT_REF_NAME\n\n+    url: https://appetize.io/app/$APPETIZE_PUBLIC_KEY\n   script:\n     - bundle exec fastlane review\n   only:\n     - branches\n   except:\n     - master\n+  artifacts:\n\n+    reports:\n\n+      dotenv: deploy.env\n\n testDebug:\n   image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n```\n\n\nOnce committed, pushed, and a pipeline runs, we should see our environment\n\ndeployed!\n\n\n![Our first review environment][first-review-app]\n\n\n## Optimizing Updates\n\n\nAfter running with this for a bit, we realized that we were accidentally\n\ncreating a new app on Appetize.io with every new build! Their docs\n\n[specify how to update existing apps], so we went about seeing if we could\n\nsmartly update existing environments.\n\n\nSpoiler alert: We could.\n\n\nFirst, we need to save the public key granted to us by Appetize.io\nsomewhere. We\n\ndecided to put it in a JSON file and save that as an artifact of the build.\n\nFortunately, the `Fastfile` is just ruby, which allows us to quickly write\nit\n\nout to a file with a few lines of code, as well as attempt to fetch the\nartifact\n\nfor the last build of the current branch.\n\n\n```diff\n\ndiff --git a/fastlane/Fastfile b/fastlane/Fastfile\n\nindex ae3867c..61e9226 100644\n\n--- a/fastlane/Fastfile\n\n+++ b/fastlane/Fastfile\n\n@@ -13,8 +13,32 @@\n # Uncomment the line if you want fastlane to automatically update itself\n # update_fastlane\n\n+require 'net/http'\n\n+require 'json'\n\n+\n\n+GITLAB_TOKEN = ENV['PRIVATE_TOKEN']\n\n+PROJECT_ID = ENV['CI_PROJECT_ID']\n\n+REF = ENV['CI_COMMIT_REF_NAME']\n\n+JOB = ENV['CI_JOB_NAME']\n\n+API_ROOT = ENV['CI_API_V4_URL']\n\n+\n\n+def public_key\n\n+  uri =\nURI(\"#{API_ROOT}/projects/#{PROJECT_ID}/jobs/artifacts/#{REF}/raw/appetize-information.json?job=#{JOB}\")\n\n+  http = Net::HTTP.new(uri.host, uri.port)\n\n+  http.use_ssl = true\n\n+  req = Net::HTTP::Get.new(uri)\n\n+  req['PRIVATE-TOKEN'] = GITLAB_TOKEN\n\n+  response = http.request(req)\n\n+  return '' if response.code.equal?('404')\n\n+\n\n+  appetize_info = JSON.parse(response.body)\n\n+  appetize_info['publicKey']\n\n+end\n\n def update_deployment_url(pub_key)\n+  File.open('../appetize-information.json', 'w') do |f|\n\n+    f.write(JSON.generate(publicKey: pub_key))\n\n+  end\n   File.open('../deploy.env', 'w') do |f|\n     f.write(\"APPETIZE_PUBLIC_KEY=#{pub_key}\")\n   end\n@@ -42,6 +66,7 @@ platform :android do\n   desc 'Pushes the app to Appetize and updates a review app'\n   lane :review do\n     appetize(api_token: ENV['APPETIZE_TOKEN'],\n+             public_key: public_key,\n              path: 'app/build/outputs/apk/debug/app-debug.apk',\n              platform: 'android')\n     update_deployment_url(lane_context[SharedValues::APPETIZE_PUBLIC_KEY])\n```\n\n\nWhen we go to deploy our app to Appetize, we hit the [Jobs API] to see if we\n\nhave a public key for this branch. If the API returns a `404`, we know we\nare\n\nbuilding a fresh branch and return an empty string, else we parse the JSON\nand\n\nreturn our public key. The [Fastlane docs] state the `appetize` action can\ntake\n\na `public_key` to update an existing app. Here, `''` is considered the same\nas\n\n_not_ providing a public key, so a new application is still deployed as we\nexpect.\n\n\n**NOTE:** If you've read the `diff` closely, you'll notice the usage of an\n\nenvironment variable called `PRIVATE_TOKEN`. This is a GitLab private token\n\ncreated with the `read_api` scope and injected into our build as an\nenvironment\n\nvariable. This is required to authenticate with the GitLab API and fetch\n\nartifacts.\n\n\nOnce we update `.gitlab-ci.yml` to save the new `appetize-information.json`\nfile\n\nas an artifact, later builds on the same branch will be smart and update the\n\nexisting Appetize app!\n\n\n```diff\n\ndiff --git a/.gitlab-ci.yml b/.gitlab-ci.yml\n\nindex c834077..54cf3f6 100644\n\n--- a/.gitlab-ci.yml\n\n+++ b/.gitlab-ci.yml\n\n@@ -95,6 +95,8 @@ deployReview:\n   except:\n     - master\n   artifacts:\n+    paths:\n\n+      - appetize-information.json\n     reports:\n       dotenv: deploy.env\n```\n\n\n## Cleaning up\n\n\nAll that's left is to delete old apps from Appetize once we don't need them\n\nanymore. We can do that by leveraging `on_stop` and creating a `stop` job\nthat\n\nwill delete our app from Appetize.io\n\n\n```diff\n\ndiff --git a/.gitlab-ci.yml b/.gitlab-ci.yml\n\nindex 54cf3f6..f6ecf7e 100644\n\n--- a/.gitlab-ci.yml\n\n+++ b/.gitlab-ci.yml\n\n@@ -10,6 +10,7 @@ stages:\n   - alpha\n   - beta\n   - production\n+  - stop\n\n\n .updateContainerJob:\n@@ -88,6 +89,7 @@ deployReview:\n   environment:\n     name: review/$CI_COMMIT_REF_NAME\n     url: https://appetize.io/app/$APPETIZE_PUBLIC_KEY\n+    on_stop: stopReview\n   script:\n     - bundle exec fastlane review\n   only:\n@@ -100,6 +102,22 @@ deployReview:\n     reports:\n       dotenv: deploy.env\n\n+stopReview:\n\n+  stage: stop\n\n+  environment:\n\n+    name: review/$CI_COMMIT_REF_NAME\n\n+    action: stop\n\n+  variables:\n\n+    GIT_STRATEGY: none\n\n+  when: manual\n\n+  only:\n\n+    - branches\n\n+  except:\n\n+    - master\n\n+  script:\n\n+    - apt-get -y update && apt-get -y upgrade && apt-get -y install jq curl\n\n+    - curl --request DELETE\nhttps://$APPETIZE_TOKEN@api.appetize.io/v1/apps/`jq -r '.publicKey' \u003C\nappetize-information.json`\n\n+\n testDebug:\n   image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n   stage: test\n```\n\n\nOnce your MR is merged and your branch is deleted, the `stopReview` job\nruns,\n\ncalling the [`DELETE` endpoint of the Appetize.io API] with the public key\nthat\n\nis contained in `appetize-information.json`. We don't need to fetch\n\n`appetize-information.json` because the artifact is already present in our\nbuild\n\ncontext. This is because the `stop` stage happens _after_ the `review`\nstage.\n\n\n![A merge request with a deployed review app][merge-request-with-review-app]\n\n\n## Conclusion\n\n\nThanks to some integration with _fastlane_ and the addition of a couple\n\nenvironment variables, having the ability to create review apps for an\nAndroid\n\nproject was surpsingly simple. GitLab's review apps are not _just_ for\nweb-based\n\nprojects, even though it may take a little tinkering to get working.\nAppetize.io\n\nalso supports iOS applications, so all mobile native applications can be\nturned\n\ninto review apps. I would love to see this strategy be applied to a React\nNative\n\nproject as well!\n\n\n[previous look at gitlab and _fastlane_]:\n/blog/android-publishing-with-gitlab-and-fastlane/\n\n[my mr to the gitter android project]:\nhttps://gitlab.com/gitlab-org/gitter/gitter-android-app/-/merge_requests/167\n\n[review apps]: https://docs.gitlab.com/ee/ci/review_apps/#review-apps\n\n[appetize.io]: https://appetize.io\n\n[appetize api docs]: https://appetize.io/docs#request-api-token\n\n[new way of defining environment urls with alternative variables exists]:\nhttps://docs.gitlab.com/ee/ci/environments/index.html#set-dynamic-environment-urls-after-a-job-finishes\n\n[first-review-app]:\n/images/blogimages/how-to-create-review-apps-for-android-with-gitlab-fastlane-and-appetize-dot-io/first-review-app.png\n\n[specify how to update existing apps]:\nhttps://appetize.io/docs#updating-apps\n\n[jobs api]:\nhttps://docs.gitlab.com/ee/api/jobs.html#download-a-single-artifact-file-from-specific-tag-or-branch\n\n[fastlane docs]: https://docs.fastlane.tools/actions/appetize/\n\n[`delete` endpoint of the appetize.io api]:\nhttps://appetize.io/docs#deleting-apps\n\n[merge-request-with-review-app]:\n/images/blogimages/how-to-create-review-apps-for-android-with-gitlab-fastlane-and-appetize-dot-io/merge-request-with-review-app.png\n",[9,232,695,917],{"slug":3310,"featured":6,"template":700},"how-to-create-review-apps-for-android-with-gitlab-fastlane-and-appetize-dot-io","content:en-us:blog:how-to-create-review-apps-for-android-with-gitlab-fastlane-and-appetize-dot-io.yml","How To Create Review Apps For Android With Gitlab Fastlane And Appetize Dot Io","en-us/blog/how-to-create-review-apps-for-android-with-gitlab-fastlane-and-appetize-dot-io.yml","en-us/blog/how-to-create-review-apps-for-android-with-gitlab-fastlane-and-appetize-dot-io",{"_path":3316,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3317,"content":3323,"config":3328,"_id":3330,"_type":14,"title":3331,"_source":16,"_file":3332,"_stem":3333,"_extension":19},"/en-us/blog/how-to-deploy-react-to-amazon-s3",{"title":3318,"description":3319,"ogTitle":3318,"ogDescription":3319,"noIndex":6,"ogImage":3320,"ogUrl":3321,"ogSiteName":685,"ogType":686,"canonicalUrls":3321,"schema":3322},"How to deploy a React application to Amazon S3 using GitLab CI/CD","Follow this guide to use OpenID Connect to connect to AWS and deploy a React application to Amazon S3.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663291/Blog/Hero%20Images/cover1.jpg","https://about.gitlab.com/blog/how-to-deploy-react-to-amazon-s3","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy a React application to Amazon S3 using GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jeremy Wagner\"}],\n        \"datePublished\": \"2023-03-01\",\n      }",{"title":3318,"description":3319,"authors":3324,"heroImage":3320,"date":3325,"body":3326,"category":718,"tags":3327},[3204],"2023-03-01","Amazon S3 has a Static Website Hosting feature which allows you to host a\nstatic website directly from an S3 bucket. When you \n\nhost your website on S3, your website content is stored in the S3 bucket and\nserved directly to your users, without the need \n\nfor additional resources. Combine this with Amazon CloudFront and you will\nhave a cost-effective and scalable solution for \n\nhosting static websites – making it a popular choice for single-page\napplications.\n\n\nIn this post, I will walk you through setting up your Amazon S3 bucket,\nsetting up OpenID Connect ([OIDC](https://openid.net/connect/)) in AWS, and\ndeploying your application \n\nto your Amazon S3 bucket using a GitLab [CI/CD](/topics/ci-cd/) pipeline.\n\n\nBy the end of this post, you will have a [CI/CD\npipeline](/blog/how-to-keep-up-with-ci-cd-best-practices/) built\nin GitLab that automatically deploys to your Amazon S3 bucket. Let's dive\nin.\n\n\n## Prerequisites\n\n\nFor this guide you will need the following:\n\n\n- [Node.js](https://nodejs.org/en/) >= 14.0.0 and npm >= 5.6 installed on\nyour system\n\n- [Git](https://git-scm.com/) installed on your system\n\n- A [GitLab](https://gitlab.com/-/trial_registrations/new) account\n\n- An [AWS](https://aws.amazon.com/free/) account\n\n\n[A previous\ntutorial](/blog/how-to-automate-testing-for-a-react-application-with-gitlab/)\ndemonstrated how to create a new React \n\napplication, run unit tests as part of the CI process in GitLab, and output\nthe test results and code coverage into the pipeline. This post continues\nwhere that project left off, so to follow along you can fork [this\nproject](https://gitlab.com/guided-explorations/engineering-tutorials/react-unit-testing)\nor complete the guide in the linked post.\n\n\n## Configure your Amazon S3 bucket\n\n\nYou'll need to configure your Amazon S3 bucket so let's do that first.\n\n\n### Create your bucket\n\n\nAfter you log in to your AWS account, search for S3 using the search bar and\nselect the S3 service. This will open the S3 service home page.\n\n\nRight away, you should see the option to create a bucket. The bucket is\nwhere you are going to store your built React application. Click the\n**Create bucket** button to continue.\n\n\n![Create S3\nbucket](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/create_bucket.png){:\n.shadow}\n\n\nGive your bucket a name, select your region, leave the rest of the settings\nas default (we’ll come back to these later), and continue by \n\nclicking the **Create bucket** button. When naming your bucket, it’s\nimportant to remember that your bucket name must be unique and follow the \n\nbucket naming rules. I named mine `jw-gl-react`.\n\n\nAfter creating your bucket, you should be taken to a list of your buckets as\nshown below.\n\n\n![S3 bucket\nlist](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/bucket_list.png){:\n.shadow}\n\n\n### Configure static website hosting\n\n\nThe next step is to configure static website hosting. Open your S3 bucket by\nclicking into the bucket name. Select the **Properties** tab and \n\nscroll to the bottom to find the static website hosting option.\n\n\n![static hosting\nbutton](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/static_hosting_1.png){:\n.shadow}\n\n\nClick **Edit** and then enable static website hosting. For the **Index** and\n**Error** document, enter `index.html` and then click **Save changes**.\n\n\n![edit static\nhosting](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/static_hosting_2.png){:\n.shadow}\n\n\n### Set up permissions\n\n\nNow that you have enabled static website hosting, you need to update your\npermissions so the public can visit your website. Return to your bucket and\nselect the **Permissions** tab.\n\n\nUnder **Block public access (bucket settings)**, click **Edit** and uncheck\n**Block all public access** and continue to **Save changes**.\n\n\n![block public\naccess](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/block_access_1.png){:\n.shadow}\n\n\nYour page should now look this this:\n\n\n![saved blocked public\naccess](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/block_access_2.png){:\n.shadow}\n\n\nNow, you need to edit the Bucket Policy. Click the **Edit** button in the\n**Bucket Policy** section. Paste the following code into your new policy:\n\n\n```javascript\n\n{\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n        {\n            \"Sid\": \"PublicReadGetObject\",\n            \"Effect\": \"Allow\",\n            \"Principal\": \"*\",\n            \"Action\": \"s3:GetObject\",\n            \"Resource\": \"arn:aws:s3:::jw-gl-react/*\"\n        }\n    ]\n}\n\n```\n\n\nReplace `jw-gl-react` on the resource property with the name of your bucket\nand **Save changes**.\n\n\nYour bucket should now look like this:\n\n\n![publicly accessible\nbucket](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/block_access_3.png){:\n.shadow}\n\n\n## Manually upload your React application\n\n\nNow, let’s build your React application and manually publish it to your S3\nbucket. \n\n\nTo build the application, make sure your project is cloned to your local\nmachine and run the following command in your terminal inside of your \n\nrepository directory:\n\n\n```\n\nnpm run build\n\n```\n\n\nThis will create a build folder inside of your repository directory.\n\n\nInside of your bucket, click the **Upload** button.\n\n\n![manual bucket\nupload](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/upload_1.png){:\n.shadow}\n\n\nDrag the contents of your newly created build folder (not the folder itself)\ninto the upload area. This will \n\nupload the contents of your application into your S3 bucket. Make sure to\nclick **Upload** at the bottom of the page to start the upload.\n\n\nNow return to your bucket **Properties** tab and scroll to the bottom to\nfind the URL of your static website.\n\n\n![static website\nurl](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/upload_2.png){:\n.shadow}\n\n\nClick the link and you should see your built React application open in your\nbrowser.\n\n\n![deployed\napp](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/manual_deploy.png){:\n.shadow}\n\n\n## Set up OpenID Connect in AWS\n\n\nTo deploy to your S3 Bucket from GitLab, we’re going to use a GitLab CI/CD\njob to receive temporary credentials \n\nfrom AWS without needing to store secrets. To do this, we’re going to\nconfigure OIDC for ID federation \n\nbetween GitLab and AWS. We’ll be following the [related GitLab\ndocumentation](https://docs.gitlab.com/ee/ci/cloud_services/aws/).\n\n\n### Add the identity provider\n\n\nThe first step is going to be adding GitLab as an identity and access\nmanagement (IAM) OIDC provider in AWS. AWS has instructions located\n[here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc.html), \n\nbut I will walk through it step by step.\n\n\nOpen the IAM console inside of AWS.\n\n\n![iam\nsearch](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/iam_1.png){:\n.shadow}\n\n\nOn the left navigation pane, under **Access management** choose **Identity\nproviders** and then choose **Add provider**. \n\nFor provider type, select **OpenID Connect**.\n\n\nFor **Provider URL**, enter the address of your GitLab instance, such as\n`https://gitlab.com` or `https://gitlab.example.com`.\n\n\nFor **Audience**, enter something that is generic and specific to your\napplication. In my case, I'm going to \n\nenter `react_s3_gl`. To prevent confused deputy attacks, it's best to make\nthis something that is not easy to guess. Take a note of \n\nthis value, you will use it to set the `ID_TOKEN` in your `.gitlab-ci.yml`\nfile.\n\n\nAfter entering the **Provider URL**, click **Get thumbprint** to verify the\nserver certificate of your IdP. After this, go \n\nahead and choose **Add provider** to finish up.\n\n\n### Create the permissions policy\n\n\nAfter you create the identity provider, you need to create a permissions\npolicy.\n\n\nFrom the IAM dashboard, under **Access management** select **Policies** and\nthen **Create policy**. \n\nSelect the JSON tab and paste the following policy replacing `jw-gl-react`\non the resource line with your bucket name.\n\n\n```javascript\n\n{\n  \"Version\": \"2012-10-17\",\n  \"Statement\": [\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\"s3:ListBucket\"],\n      \"Resource\": [\"arn:aws:s3:::jw-gl-react\"]\n    },\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"s3:PutObject\",\n        \"s3:GetObject\",\n        \"s3:DeleteObject\"\n      ],\n      \"Resource\": [\"arn:aws:s3:::jw-gl-react/*\"]\n    }\n  ]\n}\n\n```\n\n\nSelect the **Next: Tags** button, add any tags you want, and then select the\n**Next: Review** button. \n\nEnter a name for your policy and finish up by creating the policy. \n\n\n### Configure the role\n\n\nNow it’s time to add the role. From the IAM dashboard, under **Access\nmanagement** select **Roles** \n\nand then select **Create role**. Select **Web identity**.\n\n\nIn the **Web identity** section, select the identity provider you created\nearlier. For the \n\n**Audience**, select the audience you created earlier. Select the **Next**\nbutton to continue.\n\n\nIf you wanted to limit authorization to a specific group, project, branch,\nor tag, you could create a **Custom trust policy** \n\ninstead of a **Web identity**. Since I will be deleting these resources\nafter the tutorial, I'm going to keep it simple. For a \n\nfull list of supported filterting types, see the [GitLab\ndocumentation](https://docs.gitlab.com/ee/ci/cloud_services/index.html#configure-a-conditional-role-with-oidc-claims).\n\n\n![web\nidentity](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/iam_2.png){:\n.shadow}\n\n\nDuring the **Add permissions** step, select the policy you created and\nselect **Next** to continue. Give your role a name and click **Create\nrole**.\n\n\nOpen the Role you just created. In the summary section, find the Amazon\nResource Name (ARN) and save it somewhere secure. You will use this in your\npipeline.\n\n\n![role](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/iam_3.png){:\n.shadow}\n\n\n## Deploy to your Amazon S3 bucket using a GitLab CI/CD pipeline\n\n\nInside of your project, create two [CI/CD\nvariables](https://docs.gitlab.com/ee/ci/variables/#define-a-cicd-variable-in-the-ui).\nThe first variable should be named `ROLE_ARN`. For the value, paste the ARN\nof the \n\nrole you just created. The second variable should be named `S3_BUCKET`. For\nthe value, paste the name of the S3 bucket you created \n\nearlier in this post.\n\n\nI have chosen to mask my variables for an extra layer of security.\n\n\n### Retrieve your temporary credentials\n\n\nInside of your `.gitlab-ci.yml` file, paste the following code:\n\n\n```\n\n.assume_role: &assume_role\n    - >\n      STS=($(aws sts assume-role-with-web-identity\n      --role-arn ${ROLE_ARN}\n      --role-session-name \"GitLabRunner-${CI_PROJECT_ID}-${CI_PIPELINE_ID}\"\n      --web-identity-token $ID_TOKEN\n      --duration-seconds 3600\n      --query 'Credentials.[AccessKeyId,SecretAccessKey,SessionToken]'\n      --output text))\n    - export AWS_ACCESS_KEY_ID=\"${STS[0]}\"\n    - export AWS_SECRET_ACCESS_KEY=\"${STS[1]}\"\n    - export AWS_SESSION_TOKEN=\"${STS[2]}\"\n```\n\n\nThis is going to use the the AWS Security Token Service to generate\ntemporary (_3,600 seconds_) credentials utilizing the OIDC role you created\nearlier.\n\n\n### Create the deploy job\n\n\nNow, let's add a build and deploy job to build your application and deploy\nit to your S3 bucket.\n\n\nFirst, update the stages in your `.gitlab-ci.yml` file to include a `build`\nand `deploy` stage as shown below:\n\n\n```\n\nstages:\n  - build\n  - test\n  - deploy\n```\n\n\nNext, let's add a job to build your application. Paste the following code in\nyour `.gitlab-ci.yml` file:\n\n\n```\n\nbuild artifact:\n  stage: build\n  image: node:latest\n  before_script:\n    - npm install\n  script:\n    - npm run build\n  artifacts:\n    paths:\n      - build/\n    when: always\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == \"main\"'\n      when: always\n```\n\n\nThis is going to run `npm run build` if the change occurs on the `main`\nbranch and upload the build directory as an \n\nartifact to be used during the next step.\n\n\nNext, let's add a job to actually deploy to your S3 bucket. Paste the\nfollowing code in your `.gitlab-ci.yml` file:\n\n\n```\n\ndeploy s3:\n  stage: deploy\n  image:\n    name: amazon/aws-cli:latest\n    entrypoint: \n      - '/usr/bin/env'\n  id_tokens:\n      ID_TOKEN:\n        aud: react_s3_gl\n  script:\n    - *assume_role\n    - aws s3 sync build/ s3://$S3_BUCKET\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == \"main\"'\n      when: always\n```\n\n\nThis uses [YAML\nanchors](https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html#yaml-anchors-for-scripts)\nto run the `assume_role` script, \n\nand then uses the `aws cli` to upload your build artifact to the bucket you\ndefined as a variable. This job also only runs if the change occurs \n\non the `main` branch.\n\n\nMake sure the `aud` value matches the value you entered for your audience\nwhen you setup the identity provider. In my case, I entered `react-s3_gl`.\n\n\nYour complete `.gitlab-ci.yml` file should look like this:\n\n\n```\n\nstages:\n  - build\n  - test\n  - deploy\n\n.assume_role: &assume_role\n    - >\n      STS=($(aws sts assume-role-with-web-identity\n      --role-arn ${ROLE_ARN}\n      --role-session-name \"GitLabRunner-${CI_PROJECT_ID}-${CI_PIPELINE_ID}\"\n      --web-identity-token $ID_TOKEN\n      --duration-seconds 3600\n      --query 'Credentials.[AccessKeyId,SecretAccessKey,SessionToken]'\n      --output text))\n    - export AWS_ACCESS_KEY_ID=\"${STS[0]}\"\n    - export AWS_SECRET_ACCESS_KEY=\"${STS[1]}\"\n    - export AWS_SESSION_TOKEN=\"${STS[2]}\"\n  \nunit test:\n  image: node:latest\n  stage: test\n  before_script:\n    - npm install\n  script:\n    - npm run test:ci\n  coverage: /All files[^|]*\\|[^|]*\\s+([\\d\\.]+)/\n  artifacts:\n    paths:\n      - coverage/\n    when: always\n    reports:\n      junit:\n        - junit.xml\n\nbuild artifact:\n  stage: build\n  image: node:latest\n  before_script:\n    - npm install\n  script:\n    - npm run build\n  artifacts:\n    paths:\n      - build/\n    when: always\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == \"main\"'\n      when: always\n\n\ndeploy s3:\n  stage: deploy\n  image:\n    name: amazon/aws-cli:latest\n    entrypoint: \n      - '/usr/bin/env'\n  id_tokens:\n      ID_TOKEN:\n        aud: react_s3_gl\n  script:\n    - *assume_role\n    - aws s3 sync build/ s3://$S3_BUCKET\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == \"main\"'\n      when: always\n```\n\n\n### Make a change and test your pipeline\n\n\nTo test your pipeline, inside of `App.js`, change this line `Edit\n\u003Ccode>src/App.js\u003C/code> and save to reload.` to \n\n`This was deployed from GitLab!` and commit your changes to the `main`\nbranch. The pipeline should kick off and when \n\nit finishes successfully you should see your updated application at the URL\nof your static website.\n\n\n![updated\napp](https://about.gitlab.com/images/blogimages/2023-02-10-how-to-deploy-react-to-amazon-s3/auto_deploy.png){:\n.shadow}\n\n\nYou now have a CI/CD pipeline built in GitLab that receives temporary\ncredentials from AWS using OIDC and \n\nautomatically deploys to your Amazon S3 bucket. To take it a step further,\nyou can [secure your\napplication](https://docs.gitlab.com/ee/user/application_security/secure_your_application.html) \n\nwith GitLab's built-in security tools.\n\n\nAll code for this project can be found\n[here](https://gitlab.com/guided-explorations/engineering-tutorials/react-s3).\n\n\nCover image by [Lucas van\nOor](https://unsplash.com/@switch_dtp_fotografie?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non\n[Unsplash](https://unsplash.com/s/photos/bucket?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n\n{: .note}\n\n\n## Related posts and documentation\n\n- [How to automate testing for a React application with\nGitLab](/blog/how-to-automate-testing-for-a-react-application-with-gitlab/)\n\n- [How to deploy AWS with GitLab](/blog/deploy-aws/)\n\n- [Deploy to AWS from GitLab\nCI/CD](https://docs.gitlab.com/ee/ci/cloud_deployment/)\n\n- [Configure OpenID Connect in AWS to retrieve temporary\ncredentials](https://docs.gitlab.com/ee/ci/cloud_services/aws/)\n\n- [Secure GitLab CI/CD workflows using OIDC JWT on a DevSecOps\nplatform](https://about.gitlab.com/blog/oidc/)\n",[721,9],{"slug":3329,"featured":6,"template":700},"how-to-deploy-react-to-amazon-s3","content:en-us:blog:how-to-deploy-react-to-amazon-s3.yml","How To Deploy React To Amazon S3","en-us/blog/how-to-deploy-react-to-amazon-s3.yml","en-us/blog/how-to-deploy-react-to-amazon-s3",{"_path":3335,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3336,"content":3342,"config":3347,"_id":3349,"_type":14,"title":3350,"_source":16,"_file":3351,"_stem":3352,"_extension":19},"/en-us/blog/how-to-easily-launch-gitlab-through-cloud-marketplaces",{"title":3337,"description":3338,"ogTitle":3337,"ogDescription":3338,"noIndex":6,"ogImage":3339,"ogUrl":3340,"ogSiteName":685,"ogType":686,"canonicalUrls":3340,"schema":3341},"How to easily launch GitLab through cloud marketplaces","Bitnami makes publishing GitLab into Azure Marketplace simple.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670001/Blog/Hero%20Images/bitnami-gitlab-cloud.png","https://about.gitlab.com/blog/how-to-easily-launch-gitlab-through-cloud-marketplaces","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to easily launch GitLab through cloud marketplaces\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Miranda Carter\"},{\"@type\":\"Person\",\"name\":\"Vick Kelkar\"}],\n        \"datePublished\": \"2020-09-30\",\n      }",{"title":3337,"description":3338,"authors":3343,"heroImage":3339,"date":2212,"body":3345,"category":1062,"tags":3346},[3344,2541],"Miranda Carter","\n\nToday almost every enterprise in the world moved at least some of its mission-critical workloads into public cloud environments, making it increasingly important that customers can easily deploy and manage their software in any cloud. All of the major cloud vendors have introduced marketplaces where customers can quickly deploy applications into their cloud computing infrastructure.\n\n[Bitnami](https://bitnami.com/), now part of VMware, has long partnered with the leading cloud vendors to provide a library of open source software in their marketplaces that is always up-to-date, packaged using best practices, and completely free to end users. Bitnami and GitLab worked together for years on publishing [GitLab Community Edition (CE)](/install/?version=ce) as part of this library.\n\n### The Bitnami and GitLab partnership advantage\n\nGitLab CE provides value to millions of organizations and community contributors, and this has only been enhanced by our partnership with Bitnami. By taking the GitLab CE open [source code](/solutions/source-code-management/) and packaging it in a way that is always up-to-date and easy to use out-of-the-box on almost any cloud platform, Bitnami has helped make GitLab CE accessible to hundreds of thousands of users.\n\nThe GitLab team is working with Bitnami to eliminate the complexity of packaging our enterprise application for multiple cloud marketplaces, in the same way they do for GitLab CE. This partnership enables the various marketplaces to receive timely updates of the GitLab Enterprise Edition (EE) software packages whenever there is a security issue or dependency update.\n\n### GitLab Enterprise Edition packaged by Bitnami is available on Microsoft Azure marketplace\n\nToday, we are pleased to announce that our partnership with Bitnami has helped make [GitLab EE](/install/) available in the [Microsoft Azure marketplace](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/gitlabinc1586447921813.gitlabee?tab=Overview). GitLab EE customers will be able to seamlessly deploy and use the application in these environments thanks to Bitnami’s expertise in packaging and publishing software for the public cloud.\n\nExisting customers can bring their licenses and apply them to GitLab EE in any of these environments. GitLab EE is also published by Bitnami in the VMware Cloud marketplace.\n\n### Software support for marketplace packages\n\nCustomers who deploy GitLab EE packaged by Bitnami will enjoy the same enterprise-level support that GitLab customers receive in any other supported environment. Customers who have deployed GitLab software into the cloud infrastructure already through the cloud marketplace must follow the normal GitLab software upgrade process to address any critical issues and vulnerabilities.\n\n### About the authors\n\n_[Vick Kelkar](/company/team/#vkelkar) is on Alliances team at GitLab. He has experience developing and running products for container orchestrators like Cloud Foundry and Kubernetes._\n\n_Miranda Carter has been part of the Bitnami team for over six years, and came to VMware as part of the VMware acquisition last year. Miranda is now a Program Manager at VMware and focuses on supporting Tanzu Application Catalog and supporting ISVs whenever possible._\n",[999,9,721,232],{"slug":3348,"featured":6,"template":700},"how-to-easily-launch-gitlab-through-cloud-marketplaces","content:en-us:blog:how-to-easily-launch-gitlab-through-cloud-marketplaces.yml","How To Easily Launch Gitlab Through Cloud Marketplaces","en-us/blog/how-to-easily-launch-gitlab-through-cloud-marketplaces.yml","en-us/blog/how-to-easily-launch-gitlab-through-cloud-marketplaces",{"_path":3354,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3355,"content":3361,"config":3366,"_id":3368,"_type":14,"title":3369,"_source":16,"_file":3370,"_stem":3371,"_extension":19},"/en-us/blog/how-to-include-file-references-in-your-ci-cd-components",{"title":3356,"description":3357,"ogTitle":3356,"ogDescription":3357,"noIndex":6,"ogImage":3358,"ogUrl":3359,"ogSiteName":685,"ogType":686,"canonicalUrls":3359,"schema":3360},"How to include file references in your CI/CD components","Learn how to include scripts and dependencies in your CI/CD components to minimize duplications and simplify maintenance. This tutorial takes you step-by-step through the process.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664595/Blog/Hero%20Images/blog-image-template-1800x945__9_.png","https://about.gitlab.com/blog/how-to-include-file-references-in-your-ci-cd-components","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to include file references in your CI/CD components\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2024-10-16\",\n      }",{"title":3356,"description":3357,"authors":3362,"heroImage":3358,"date":3363,"body":3364,"category":693,"tags":3365},[1835],"2024-10-16","I’m frequently asked whether included CI/CD components can reference additional files stored outside of the pipeline repository. While including components in your configuration is straightforward since they’re just YAML, many users want to know if those included components can access and execute additional files referenced by the components, like shell scripts or other dependencies. \n\nThis challenge has been a common topic of discussion in threads across the [GitLab Forum](https://forum.gitlab.com/t/gitlab-ci-includes-a-file-from-another-project-that-executes-a-script-file/111698) and [Reddit](https://www.reddit.com/r/gitlab/comments/18ma13x/gitlab_components_question/).\n\nNow for the good news: CI/CD components not only allow you to reuse pipeline configurations, saving time and effort, but you can also go a step further. With the new [CI/CD Steps](https://about.gitlab.com/blog/introducing-ci-cd-steps-a-programming-language-for-devsecops-automation/), you can directly reuse centralized automation scripts and dependencies in your pipelines. You'll gain even greater flexibility, making your pipelines more powerful and adaptable than ever.\n\nBy storing your scripts in a central location and wrapping them in CI/CD Steps, you can easily call these steps from your CI/CD components. This eliminates the need to duplicate scripts across multiple repositories and CI/CD configurations, streamlining your workflow and reducing redundancy.\n\nBefore we dive into the step-by-step guide, let’s briefly explore what CI/CD components and CI/CD Steps are.\n\n## What are CI/CD components?\n\n[CI/CD components](https://docs.gitlab.com/ee/ci/components/) are reusable units of pipeline configurations that get included in a pipeline when it’s created. The components bring additional jobs into the pipeline, however they can’t bring additional files as such reusable scripts. \n\n## What are CI/CD Steps?\n\n[CI/CD Steps](https://docs.gitlab.com/ee/ci/steps/) are reusable units of a job. Each step defines structured inputs and outputs that can be consumed by other steps. Steps can come from local files, GitLab.com repositories, or any other Git source. Steps offer a structured alternative to shell scripts for running jobs. They are modular, can be composed, tested, and easily reused, providing greater flexibility and maintainability.\n\n## What are the differences between CI/CD Steps and CI/CD components?\n\n- Component and step definitions look very similar but they take effect at different phases in pipeline execution. \n\n- Components are used when a pipeline is created while steps are used when individual jobs are running. \n\n- When a step is running, the whole repository is being downloaded into the job environment along with extra files. \n\n## A step-by-step guide\n\nHere is how CI/CD Steps and Components work together to access additional files.\n\n![CI/CD Steps flow diagram](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675829/Blog/Content%20Images/steps-diagram-for-blog.png)\n\nThis diagram illustrates the process flow: Jobs defined within components are imported into the pipeline configuration (`.gitlab-ci.yml`) when the pipeline is created. During the pipeline's execution, a job’s steps are executed, and the entire Git repository is downloaded to the [Step runner](https://docs.gitlab.com/ee/ci/steps/#using-steps) within the job’s context. This ensures that references to dependencies function correctly.\n\n**1\\. Define a component with `run` keyword that runs CI/CD Steps**\n\nRun is a new keyword that supports running steps, see the example code below. You can use [this guide](https://about.gitlab.com/blog/introducing-the-gitlab-ci-cd-catalog-beta/) to learn more on how to create Components. \n\n![template-yml](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675829/Blog/Content%20Images/Screenshot_2024-10-13_at_8.22.00.png)\n\n**2\\. Create a `step.yml` file in the project where your scripts and dependencies are located.**\n\nIn this code example, format.sh exists in the same directory as the `step.yml`. \n\n![step.yml](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675829/Blog/Content%20Images/Screenshot_2024-10-13_at_8.23.52.png)\n\n While the job is running, the Step runner will download the entire Git repository where the step is defined. The `${{ step_dir }}` step expression references the directory of the locally cached step files, allowing you to access other files from the repository. In the example above, the “format” step invokes the format.sh script.\n\n**3\\. Make sure that any files accessed by the step are located in the same repository as the `step.yml` file.**\n\n**4\\. Include the component in your CI/CD configuration.**\n\nSee this example code:\n\n![.gitlab-ci.yml](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675829/Blog/Content%20Images/Screenshot_2024-10-13_at_8.26.22.png)\n\nCode example: You can find the entire code demonstrated in this blog in this [GitLab Group](https://gitlab.com/gitlab-da/use-cases/ci-steps). \n\n**Important note:** The CI/CD Steps feature is currently [Experimental](https://docs.gitlab.com/ee/policy/experiment-beta-support.html#experiment), and the syntax may change as we continue to iterate and refine it based on user feedback. Any feedback should be provided via [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/493694).\n\n## Learn more\n\n- Watch [this walkthrough](https://youtu.be/qxTbeYXEQLM) by [Joe Burnett](https://about.gitlab.com/company/team/#josephburnett), principal engineer at GitLab, as he demonstrates the example discussed in the blog post.\n\n- [Introducing CI/CD Steps](https://about.gitlab.com/blog/introducing-ci-cd-steps-a-programming-language-for-devsecops-automation/)\n\n- [Introducing CI/CD components](https://about.gitlab.com/blog/introducing-ci-components/)",[9,721,695,693],{"slug":3367,"featured":6,"template":700},"how-to-include-file-references-in-your-ci-cd-components","content:en-us:blog:how-to-include-file-references-in-your-ci-cd-components.yml","How To Include File References In Your Ci Cd Components","en-us/blog/how-to-include-file-references-in-your-ci-cd-components.yml","en-us/blog/how-to-include-file-references-in-your-ci-cd-components",{"_path":3373,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3374,"content":3379,"config":3385,"_id":3387,"_type":14,"title":3388,"_source":16,"_file":3389,"_stem":3390,"_extension":19},"/en-us/blog/how-to-learn-ci-cd-fast",{"title":3375,"description":3376,"ogTitle":3375,"ogDescription":3376,"noIndex":6,"ogImage":1582,"ogUrl":3377,"ogSiteName":685,"ogType":686,"canonicalUrls":3377,"schema":3378},"How to learn CI/CD fast","Continuous integration and continuous delivery (CI/CD) are critical to faster software releases and it's less complicated than it seems to get rolling. Here's how to start fast with CI/CD.","https://about.gitlab.com/blog/how-to-learn-ci-cd-fast","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to learn CI/CD fast\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mike Vanbuskirk\"}],\n        \"datePublished\": \"2022-04-13\",\n      }",{"title":3375,"description":3376,"authors":3380,"heroImage":1582,"date":3382,"body":3383,"category":741,"tags":3384},[3381],"Mike Vanbuskirk","2022-04-13","\nContinuous integration and continuous delivery (CI/CD) have become the keystone technical architecture of successful DevOps implementations. CI/CD has a reputation for being complex and hard to achieve, but that doesn’t have to be the case. Modern tools enable teams to get started with minimal configuration and infrastructure management. Here’s how you can “start fast” with CI/CD and get some quick, demonstrable performance wins for your DevOps team.\n\n## What does CI/CD mean?\n\n[CI/CD](/topics/ci-cd/) refers to a system or systems that enable software development to have continuous integration and continuous delivery capabilities. The architecture underpinning CI/CD is typically referred to as a pipeline, as software progresses through various stages akin to flowing through a pipe. What does [continuous integration and continuous delivery](/blog/basics-of-gitlab-ci-updated/) actually mean? Taking some time to explore the more granular details will help us set some goals for getting a fast start with CI/CD.\n\nStarting on the left side of the pipeline, continuous integration encompasses a variety of automation that occurs over the course of multiple stages, designed to test and provide quick feedback on different aspects of code quality, functionality, and security. CI testing can run the gamut from unit tests and linting run locally on a developer workstation, to full integration testing suites and static analysis. Anyone that's ever seen a small code change cause a significant outage or breakage upon reaching production knows the value of automated, repeatable testing, and the downsides of depending on manual testing.\n\nOnce a code change has passed testing, it's time to deploy. In legacy environments, system administrators and operations staff often had to manually transfer and install updates, and reboot servers to deploy new features. This type of manual work simply does not scale to the demands of the modern application ecosystem, and is error prone to boot. With continuous delivery, that code is automatically deployed to servers in a testable and deterministic way. Code [can be staged in environments](/blog/ci-deployment-and-environments/) with less strict SLAs, such as development, staging, and QA. Once it has been verified, the new features can be launched as production workloads. In some environments, \"continuous delivery\" becomes \"continuous deployment\", in which comprehensive testing automatically deploys new code through to production without human intervention.\n\nWhat's the ultimate goal of all this automation? It's what makes a successful software organization: faster deployment cadence.\n\n## Getting started with CI/CD\n\nWith a little background established, now it's time to focus on the key objective: getting up and running quickly. The primary goal here is to get a quick win with a CI/CD implementation to improve deployment velocity, and hopefully drive a larger effort towards standardizing on widespread and effective CI/CD usage.\n\nGetting started with CI/CD can appear daunting. There is a wealth of tools, services, and platforms available to provide specific functionality and end-to-end solutions for CI/CD. Some options like [Jenkins](https://www.jenkins.io) are self-managed; others, including GitLab, have a holistic CI/CD pipeline with integrated version control.\n\n## Build your pipeline\n\nRealistically, there is no magic bullet configuration for CI/CD. Each implementation will be highly dependent on a number of factors: the type of application being deployed, the size and skillset of the engineering team/s, the business requirements, and the scale of the application itself. The design and implementation considerations for an application that might see 100 users per day is vastly different from one that sees 1 million. The same holds true for CI/CD.\n\nBelow are 5 high-level strategies for tackling that first CI/CD pipeline:\n\n### 1. Start small\n\nDon't try to fix everything at once. Attempts to refactor an entire codebase or infrastructure will be a complex process, typically involving multiple layers of approval, discussion, planning, and possible pushback from dependent teams. It's much easier to choose a small subset of the application infrastructure to improve.\n\n### 2. Catch low-hanging fruit early\n\nSome of the simplest and easiest to detect (and fix) errors can end up causing the biggest problems if they make it into production workloads. However, it might not make sense to add unnecessary steps or complexity to the CI/CD pipeline. In this instance, it’s a good choice to configure some automatic testing to take place on developer machines before code is committed. Most Git DVCS providers, including GitLab, allow users to deploy pre-commit hooks. Pre-commit hooks are typically some type of script or automation that are triggered when specific actions occur. For example, when a developer initiates a new commit, a pre-commit hook might check that the code conforms to syntactical and structural standards, and is free from basic syntax errors. Other pre-commit hooks might ensure that unit tests are run successfully before a commit is allowed to proceed into the larger pipeline.\n\n### 3. Make security a part of CI/CD\n\nTests shouldn't just be limited to syntax and logic. Catching security issues early in the software development lifecycle (SDLC) means they are much easier, cheaper, and safer to fix. Adding some basic [static code analysis tools](https://docs.gitlab.com/user/application_security/sast/customize_rulesets/) and dependency checkers can vastly improve the security posture of an application by providing fast feedback and early detection of common security problems and potential vulnerabilities.\n\n### 4. Tailor tests to common issues\n\nMost engineering teams that rely on legacy deployment methodologies should be able to easily identify one or two common, recurring issues in deployments. Perhaps copying application code to servers via SCP always results in broken file permissions, or an [NGINX](https://www.nginx.com) frontend is never properly restarted. For the first iteration of [automated testing](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/), choose these specific issues to address with testing. This serves two purposes; it limits the scope of work and gives the team an achievable [\"definition of done,\"](https://www.leadingagile.com/2017/02/definition-of-done/) and it provides a highly visible success story by fixing the most problematic existing deployment problems. Once a working pipeline has been deployed and there is organizational buy-in, the testing suite can be expanded.\n\n### 5. Automate deployment to lower environments\n\nNew CI/CD implementations should [focus on continuous delivery](/blog/cd-solution-overview/), automatically deploying to a staging environment, and providing a manual decision interface for deploying to production. Continuous deployment is generally a step that should be taken further in the DevOps journey when there is more collective knowledge and technical maturity around automated deployments.\n\n## Get a fast start with CI/CD\n\nA good CI/CD implementation can measurably improve software deployment velocity and is a core pillar of a solid DevOps strategy. However, the first attempt at utilizing CI/CD should eschew heavy, complex deployments whenever possible, instead focusing on a \"batteries-included\" approach that provides teams with a short time-to-value cycle.\n\nOnce CI/CD provides that quick win, engineering teams can build on that momentum and buy-in to scale the solution across the entire organization, improving deployment speed and outcomes throughout.\n",[9,721,873],{"slug":3386,"featured":6,"template":700},"how-to-learn-ci-cd-fast","content:en-us:blog:how-to-learn-ci-cd-fast.yml","How To Learn Ci Cd Fast","en-us/blog/how-to-learn-ci-cd-fast.yml","en-us/blog/how-to-learn-ci-cd-fast",{"_path":3392,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3393,"content":3399,"config":3406,"_id":3408,"_type":14,"title":3409,"_source":16,"_file":3410,"_stem":3411,"_extension":19},"/en-us/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two",{"title":3394,"description":3395,"ogTitle":3394,"ogDescription":3395,"noIndex":6,"ogImage":3396,"ogUrl":3397,"ogSiteName":685,"ogType":686,"canonicalUrls":3397,"schema":3398},"Bamboo Server to GitLab CI migration: Advanced techniques","A real-world look at how a migrated CI/CD infrastructure will work in GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679081/Blog/Hero%20Images/jenkins-migration.jpg","https://about.gitlab.com/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to migrate Atlassian's Bamboo server's CI/CD infrastructure to GitLab CI, part two\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ivan Lychev\"}],\n        \"datePublished\": \"2022-07-11\",\n      }",{"title":3400,"description":3395,"authors":3401,"heroImage":3396,"date":3403,"body":3404,"category":718,"tags":3405},"How to migrate Atlassian's Bamboo server's CI/CD infrastructure to GitLab CI, part two",[3402],"Ivan Lychev","2022-07-11","\nIn [part one of our series](/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci/), I showed you how to migrate from Atlassian’s Bamboo Server to GitLab CI/CD. In this blog post we’re going to take a deep dive into how it works from a user’s perspective.\n\n## Get started\n\nYou’ve deployed the demo so it’s time to play with it to understand how it works.\n\nLet's imagine that one of the members of our project is John Doe. He is a software engineer responsible for developing some components (app1, app2, and app3) of the entire product, and he and his team would like to test those components in several combinations in myriad preview environments. So, what does that look like?\n\nFirst of all, let’s make some commits to the app1, app2, and app3 source code and get successful builds upon those commits.\n\nAfter that, we should create releases for those apps to be able to deploy them (as the deployment part of the apps CI config only shows when being triggered by a Git tag, i.e., a GitLab release). A release can be created by launching the last step (`manual-create-release`) in a commit pipeline. That would give us a new release with the ugly name containing the date and commit SHA in the patch part (in accord to `semver` scheme):\n\n\n\n![app_gitlab_release](https://about.gitlab.com/images/blogimages/app_gitlab_release.png)\n\nOn the `Tags` tab for the same app you now can see a deployment part of the pipeline has been triggered by the just created GitLab release but no actual environments to deploy are displayed (the `_` item in the `Deploy-nonprod` stage is not an env):\n\n\n![absent_envs](https://about.gitlab.com/images/blogimages/absent_envs.png)\n\n\n## Create an environment\n\nBut before that we have to briefly switch to another team who is responsible for preparing infrastructure IaC templates. Navigate to the `infra/environment-blueprints` project and pretend you are a member of that team doing their job. Namely, imagine you have just created some initial set of IaC files (they are already kindly prepared by me and present in the repository). You’ve tested them and now you feel that they are ready to be used by the other members of the project. You indicate such a readiness of a particular version of the IaC files by giving it a GitTag. Let’s put a tag like `v1.0.0` onto the HEAD version.\n\nYou will see how the tags are going to be used immediately. But first let's make some changes to the IaC files (e.g., add a new resource for some of the apps) and create a second Git tag, let's say `v1.1.0`. So, at this moment we have two versions of IaC templates (or `blueprints`) for our infrastructure - `v1.0.0` and `v1.1.0`.\n\n## Deploy an app into the environment\n\nNow we can return back to John and his team. We assume John is somehow informed that the version of the IaC templates he should use is `v1.0.0`. He wants to create a new preview environment out of the IaC templates of that version and put app1 and app2 into that env. \n\n(Here starts a description of how a user interoperates with the `infrastructure-set` Git repo. Notice that though the eventual idea is that it should be a Merge Request workflow – where you first get a Terraform plan within a Merge Request and can apply such a plan by merging the MR – which is widely advocated by GitLab but for the sake of simplicity here the MR workflow is not implemented and instead direct push commits into a branch are made).\n\nJohn wants the env to be named `preview-for-johns-team`. He creates a new branch in the `infrastructure-set` repo with that name and puts two files into it: a `version.txt` containing text `v1.0.0` and `apps.txt` with text `app1 app2` inside (the files format and its content is utterly simplified). \n\nThe `infrastructure-set` pipeline is triggered by the new branch and first generates a Terraform plan using the set of the Terraform files indicated by the tag specified in `version.txt`. John reviews the plan and wants to proceed with creating the environment by starting the `Terraform-apply` stage:\n\n\n![new_env_pipeline](https://about.gitlab.com/images/blogimages/new_env_pipeline.png)\n\n\n(To store the Terraform plan as artifact and Terraform state the embedded features of GitLab are leveraged - [Package Registry](https://docs.gitlab.com/ee/user/packages/package_registry/) and [Terraform HTTP back-end by GitLab](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html).)\n\nNow return to the `app1` project and rerun the pipeline for the app1 release we created previously to make it regenerate a list of environments to deploy. You should see that the `preview-for-johns-team` item has appeared in the list of the environments:\n\n\n![new_env_in_the_deploy_pipeline](https://about.gitlab.com/images/blogimages/new_env_in_the_deploy_pipeline.png)\n\n\nClick the arrow button to deploy. Then refer to the `Deployments/Environments` section of the `app1` project to ensure a new env with the app1 release deployed into it is displayed.\n\nWe have successfully created a new environment and deployed one of the apps into it!\n\nNotice that although the above describes how users manually deploy the applications into an environment after it has been created which doesn’t look really convenient, in a real life scenario we most likely would have some additional step in the `infrastructure-set` pipeline that runs after Terraform successfully finishes creating an environment and triggers deployment pipelines for all the applications specified in the `apps.txt`. In that situation, we would need to establish which versions of the applications should be deployed in such an automated manner - for example, those might be the latest versions available for each app or the versions currently deployed to production, etc.\n\n## Update an environment's infrastructure\n\nJohn got notified that a new version of the infrastructure templates is available (you remember that `v1.1.0` tag in the `environment-blueprints` repo?). His team wants to assess how app1 would work within the new conditions. They decide to update an existing env, namely `preview-for-johns-team`, for that purpose. \n\nJohn walks to the `preview-for-johns-team` branch of the `environment-set` repo and changes `version.txt`'s content from `v1.0.0` to `v1.1.0`. The branch pipeline gets triggered and first shows John a Terraform plan for a diff comparing the current state of the environment. After reviewing and accepting that diff, John proceeds with actual updating the environment by launching `Terraform-apply` stage. That's it!\n\n## Advantages and disadvantages\n\n### Virtues\n\nGiven that this case assumes migrating from some existing CI/CD infrastructure based on Atlassian Bamboo with a lot of users who are familiar with it, the proposed solution leverages the native capabilities of GitLab so that it mostly keeps the concepts and workflows used with Bamboo. This strategy makes the process of migration more smooth for the users.\n\nThe solution sticks to the GitOps tenets and empowers a project with all the virtues provided by Git. For example, it's usually easy to track any changes in the infrastructure back to Git repos. (It may not be so easy for the `environment-set` project where we do not have the infrastructure changes captured in Git commits, but in that case a task of finding differences between two states of a particular environment can be accomplished by fetching the two versions of the `environment-blueprints` repo corresponding to those states denoted in the `version.txt` and figuring out the differences by using any apt tool.)\n\nThe solution tends to support user self-service where most of the tasks of changing the infrastructure can be performed only by those familiar with the basics of Git and Terraform. As a result, it offloads the DevOps team from some part of the work and removes dependence on the Ops department which comes in really handy, especially for large-scale projects.\n\n### Shortcomings\n\nBesides the mentioned deficits which stem from the necessity to utterly simplify all the aspects of this demo to make it comprehensible and possible to prepare in a sensible amount of time, this solution possesses some shortcomings that have to be resolved by using external tools to make this solution appropriate for a real life usage.\n\nFor example, there is no way to have a central dashboard with an aggregated view of all the environments with all the apps and their versions deployed into the envs. This would require creating some custom SPA web app which would gather information from GitLab via API.\n",[9,721,917],{"slug":3407,"featured":6,"template":700},"how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two","content:en-us:blog:how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two.yml","How To Migrate Atlassians Bamboo Servers Ci Cd Infrastructure To Gitlab Ci Part Two","en-us/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two.yml","en-us/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two",{"_path":3413,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3414,"content":3420,"config":3424,"_id":3426,"_type":14,"title":3427,"_source":16,"_file":3428,"_stem":3429,"_extension":19},"/en-us/blog/how-to-security-as-code",{"title":3415,"description":3416,"ogTitle":3415,"ogDescription":3416,"noIndex":6,"ogImage":3417,"ogUrl":3418,"ogSiteName":685,"ogType":686,"canonicalUrls":3418,"schema":3419},"Why implementing security as code is important for DevSecOps","We created a DevSecOps assessment to help your company level up its DevSecOps capabilities.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663618/Blog/Hero%20Images/how-to-implement-security-as-code.jpg","https://about.gitlab.com/blog/how-to-security-as-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why implementing security as code is important for DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2020-03-12\",\n      }",{"title":3415,"description":3416,"authors":3421,"heroImage":3417,"date":2329,"body":3422,"category":1040,"tags":3423},[1895],"\n## What is security as code?\n\nSecurity as code is a driving force in the future of [application security](/topics/devsecops/).\nAccording to O’Reilly, [security as code is the practice of building security\ninto DevOps tools and workflows](https://www.oreilly.com/library/view/devopssec/9781491971413/ch04.html) by mapping out how changes to code and infrastructure\nare made and finding places to add security checks, tests, and gates without\nintroducing unnecessary costs or delays.\nDevelopers can define infrastructure using a\nprogramming language with infrastructure as code. The same needs to happen to bring security to the speed of DevOps.\n\nAt a basic level, security as code can be achieved by integrating security\npolicies, tests, and scans into the pipeline and code itself. Tests should be\nrun automatically on every code commit, with results made immediately available\nto developers for fixing. By bringing security scans to the code as it’s written,\nteams will save both time and money by streamlining the review process later in\nthe software development lifecycle (SDLC).\n\n## Why is it important?\n\nSecurity as code is key to shifting left and achieving [DevSecOps](/solutions/security-compliance/): It requires\nthat security be defined at the beginning of a project and codified for\nrepeated and consistent use. In this way, it gives developers a self-service\noption for ensuring their code is secure.\n\nPredefined security policies boost efficiency, and also allow for checks on\nautomated processes to prevent any mishaps in the deployment process (like\naccidentally taking down the whole infrastructure because a problem wasn’t\nidentified in a staging environment).\n\n## Six security as code capabilities to prioritize\n\nFrancois Raynaud, founder and managing director of [DevSecCon](https://www.devseccon.com/),\nsaid that [security as code is about making security more transparent and\ngetting security practitioners and developers to speak the same language](https://techbeacon.com/devops/devseccon-security-code-secure-devops-techniques-track).\nIn other words – security teams need to understand how developers work, and use that\ninsight to help developers build the necessary security controls into the SDLC.\nDevelopers can reciprocate by staying open-minded as they adopt new tools and\npractices to boost security during the development process. Here are six best\npractices and capabilities to build into your pipeline:\n\n1. Automate security scans and tests (such as [static analysis](https://docs.gitlab.com/ee/user/application_security/sast/),\n[dynamic analysis](https://docs.gitlab.com/ee/user/application_security/dast/),\nand penetration testing) within your pipeline so that they can be reused across\nall projects and environments.\n1. Build a continuous feedback loop by presenting results to developers, allowing\nthem to remediate issues while coding and learn best practices during the coding\nprocess.\n1. Evaluate and monitor automated security policies by building checks into the\nprocess. Verify that sensitive data and secrets are not inadvertently shared or published.\n1. Automate complex or time-consuming manual tests via custom scripts, with\nhuman sign-off on results if necessary. Validate the accuracy and efficiency of\ntest scripts so that they can be replicated across different projects.\n1. Test new code within a staging environment to allow for thorough security and\nlow-impact failure, and test on every code commit.\n1. Scheduled or continuous monitoring should automatically create logs (or red\nflags) within a review dashboard (such as GitLab’s [Security Dashboard feature](https://docs.gitlab.com/ee/user/application_security/security_dashboard/index.html)).\n\n## Security as code is a best practice for a bigger goal\n\nSecurity as code gives pragmatic meaning to the concept of DevSecOps, but it\nshould not be your end goal. Ultimately, security as code is a means to get more people on board with integrating security throughout your\nSDLC. The idea will feel familiar to developers who\nhave practiced infrastructure as code, and it provides an opportunity for\nsecurity to step into the fray both to better understand software development\nand to help design the policies that will be codified in the process.\n\nAs your team works its way toward becoming a well-oiled DevSecOps machine,\nsecurity as code will inevitably present itself as a smart solution within a complex endeavor.\n\n## GitLab’s DevSecOps methodology assessment\n\nThere’s a lot to cover when standing up a DevSecOps process – so to help you\nmaster the key elements, we created a DevSecOps methodology assessment. Score\nyourself on 20 capabilities, and then use those scores to understand your DevSecOps\nmaturity level, and determine what actions your team can take to bring your DevSecOps to\nthe next level. [Download the assessment here.](https://about.gitlab.com/resources/devsecops-methodology-assessment/)\n\nCover image by [Tim Evans](https://unsplash.com/@tjevans) on [Unsplash](https://unsplash.com/photos/Uf-c4u1usFQ)\n{: .note}\n",[721,697,9,875],{"slug":3425,"featured":6,"template":700},"how-to-security-as-code","content:en-us:blog:how-to-security-as-code.yml","How To Security As Code","en-us/blog/how-to-security-as-code.yml","en-us/blog/how-to-security-as-code",{"_path":3431,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3432,"content":3438,"config":3444,"_id":3446,"_type":14,"title":3447,"_source":16,"_file":3448,"_stem":3449,"_extension":19},"/en-us/blog/how-to-translate-bamboo-agent-capabilities-to-gitlab-runner-tags",{"title":3433,"description":3434,"ogTitle":3433,"ogDescription":3434,"noIndex":6,"ogImage":3435,"ogUrl":3436,"ogSiteName":685,"ogType":686,"canonicalUrls":3436,"schema":3437},"How to translate Bamboo agent capabilities to GitLab Runner tags  ","This tutorial demonstrates how to use tags to organize GitLab Runners when building complex CI/CD pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663019/Blog/Hero%20Images/AdobeStock_519147119.jpg","https://about.gitlab.com/blog/how-to-translate-bamboo-agent-capabilities-to-gitlab-runner-tags","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to translate Bamboo agent capabilities to GitLab Runner tags  \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2024-02-22\",\n      }",{"title":3433,"description":3434,"authors":3439,"heroImage":3435,"date":3441,"body":3442,"category":718,"tags":3443},[3440],"Abubakar Siddiq Ango","2024-02-22","CI pipelines often start simple – a single job building a binary and pushing\nit to an artifact repository or to some production environment.\nEver-changing software requirements introduce more complexities, such as\nadding more jobs to perform certain checks and reviewing the output before\nthe final build job is executed.  \n\n\nThese complexities increase exponentially when builds are expected to target\nvarying systems with different system architectures or resource needs. This\nis evident in projects like operating systems, mobile apps, or software\ndistributions that support multiple deployment platforms. To account for the\nvarying needs of builds in these types of environments, having multiple\nrunners that match needed requirements is key, and that's where [GitLab\nRunner](https://docs.gitlab.com/runner/) tags come in. If you are coming\nfrom Atlassian's Bamboo, they are called \"agent capabilities.\"\n\n\nRunner tags allow organizing runners by a tag that signifies a specific use\ncase they support; these tags are then used to make sure CI jobs run on a\nrunner that meets their requirements. A job can require GPU resources that\nare only available on a handful of runners; tagging the job to the tags of\nthe runner allows it to be scheduled on the runner with GPUs.\n\n\nAgent capabilities on Bamboo are used to achieve the same functionality by\nspecifying binaries or custom identifiers that must be matched or available\nfor a job to run on a Bamboo agent. In this blog post, we will be looking at\nhow to translate Bamboo agent capabilities to GitLab Runner tags. \n\n\nBamboo has varying agent capabilities:\n\n- Executable capability specifies executables that are available on an\nagent.\n\n- JDK capability specifies that the Java Development Kit is installed and\navailable for builds.\n\n- Version Control capability lets Bamboo know the version control systems\nset up on an agent and where the client application is located.\n\n- Docker capability is used to define the agents where Docker is installed\nfor Docker tasks\n\n- Custom capability uses key/value identifiers to identify a unique\nfunctionality an agent provides.\n\n\nGitLab makes the process easier by using tags to identify Runners, some of\nwhich can be assigned multiple tags to denote the varying functionalities\nthey can provide to jobs. Let's look at how you can use Runner tags in\nGitLab.\n\n\n## Adding tags to GitLab Runner\n\n\nWhen [registering a\nrunner](https://docs.gitlab.com/runner/register/index.html) after\ninstallation, one of the steps requires providing a list of comma-separated\ntags that can be used. If none are provided at this stage, you can always\nedit the `/etc/gitlab-runner/config.toml` file and add any missing tags.\n\n\nYou can also manage the tags of a runner in GitLab by accessing the runner's\nedit page and updating the `Tags` field. You have the option for the runner\nto be exclusive to jobs that are tagged appropriately, or when there are no\ntagged jobs to run, it should run untagged jobs, too. Checking `Run untagged\njobs` enables this behavior.\n\n\n## Using tags in .gitlab-ci.yaml file\n\n\nTo run a job on a specific runner, add the relevant tags to the job's\nconfiguration, as shown below:\n\n\n```yaml\n\nbuild_ios:\n  image: macos-13-xcode-14\n  stage: build\n  script:\n    - bundle check --path vendor/bundle || bundle install --path vendor/bundle --jobs $(nproc)\n    - bundle exec fastlane build\n  tags: \n    - saas-macos-medium-m1\n```\n\nIn the example above, the job builds an iOS application only on runners\noperating on a macOS device with an M1 chip and tagged\n`saas-macos-medium-m1`.\n\n\n## Using multiple tags\n\n\nA job can specify multiple tags to target a diverse range of runners,\nespecially in organizations that run several fleets of runners as part of\ntheir software development lifecycle. A job will only run if a runner is\nfound that has all the tags the job has been tagged with. For example, if a\njob has `[linux, android, fastlane]` tags, a runner with `[ android,\nfastlane]` or `[ linux, android]` will not execute the job because the full\nset of tags does not match the runner.\n\n\n## Dynamic jobs with tags and variables\n\n\nYou can use variables to determine the values of tags and thus dynamically\ninfluence which runners pick up the jobs. For example:\n\n\n```\n\nvariables:\n  KUBERNETES_RUNNER: kubernetes\n\n  job:\n    tags:\n      - docker\n      - $KUBERNETES_RUNNER\n    script:\n      - echo \"Hello runner selector feature\"\n\n``` \n\n\nIn this example, only runners tagged with `kubernetes` will execute the job.\nYou can take this further in more complex pipelines with [`parallel:\nmatrix`](https://docs.gitlab.com/ee/ci/yaml/index.html#parallelmatrix). Here\nis an example:\n\n\n```\n\ndeploystacks:\n  stage: deploy\n  parallel:\n    matrix:\n      - PROVIDER: aws\n        STACK: [monitoring, app1]\n      - PROVIDER: gcp\n        STACK: [data]\n  tags:\n    - ${PROVIDER}-${STACK}\n  environment: $PROVIDER/$STACK\n\n```\n\n\nThis example ends up with three parallel jobs with three different tags for\neach: `aws-monitoring`, `aws-app1` and `gcp-data`, thus targeting possibly\nthree different runners.\n\n\nUsing tags in your GitLab CI configuration gives you the flexibility to\ndetermine where and how your applications are built, to use resources more\nefficiently as scarce resources can be limited to certain runners, and to\ndetermine how jobs are allocated to those runners.\n\n\n> Learn more about [how to make the move from Atlassian to\nGitLab](https://about.gitlab.com/move-to-gitlab-from-atlassian/).\n",[9,785,917],{"slug":3445,"featured":91,"template":700},"how-to-translate-bamboo-agent-capabilities-to-gitlab-runner-tags","content:en-us:blog:how-to-translate-bamboo-agent-capabilities-to-gitlab-runner-tags.yml","How To Translate Bamboo Agent Capabilities To Gitlab Runner Tags","en-us/blog/how-to-translate-bamboo-agent-capabilities-to-gitlab-runner-tags.yml","en-us/blog/how-to-translate-bamboo-agent-capabilities-to-gitlab-runner-tags",{"_path":3451,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3452,"content":3458,"config":3463,"_id":3465,"_type":14,"title":3466,"_source":16,"_file":3467,"_stem":3468,"_extension":19},"/en-us/blog/how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery",{"title":3453,"description":3454,"ogTitle":3453,"ogDescription":3454,"noIndex":6,"ogImage":3455,"ogUrl":3456,"ogSiteName":685,"ogType":686,"canonicalUrls":3456,"schema":3457},"How to use OCI images as the source of truth for continuous delivery","Discover the benefits of using Open Container Initiative images as part of GitOps workflows and the many features GitLab offers to simplify deployments to Kubernetes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097601/Blog/Hero%20Images/Blog/Hero%20Images/REFERENCE%20-%20Use%20this%20page%20as%20a%20reference%20for%20thumbnail%20sizes_76Tn5jFmEHY5LFj8RdDjNY_1750097600692.png","https://about.gitlab.com/blog/how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use OCI images as the source of truth for continuous delivery\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Daniel Helfand\"}],\n        \"datePublished\": \"2025-02-19\",\n      }",{"title":3453,"description":3454,"authors":3459,"heroImage":3455,"date":3460,"body":3461,"category":783,"tags":3462},[2426],"2025-02-19","Is [GitOps](https://about.gitlab.com/topics/gitops/) still GitOps if you are\nnot using a git repository as your deployment artifact? While git remains\ncentral to GitOps workflows, storing infrastructure definitions as Open\nContainer Initiative (OCI) artifacts in container registries has seen a rise\nin adoption as the source for GitOps deployments. In this article, we will\ndive deeper into the ideas behind this trend and how GitLab features support\nthis enhancement to GitOps workflows.\n\n\n## What is GitOps?\n\n\nThe [OpenGitOps](https://opengitops.dev/) project has defined [four\nprinciples](https://opengitops.dev/#principles) for the practice of GitOps:\n\n- A [system managed by\nGitOps](https://github.com/open-gitops/documents/blob/v1.0.0/GLOSSARY.md#software-system)\nmust have its [desired state expressed\ndeclaratively](https://github.com/open-gitops/documents/blob/v1.0.0/GLOSSARY.md#declarative-description).\n\n- Desired state is stored in a way that enforces immutability and\nversioning, and retains a complete version history.\n\n- Software agents automatically pull the desired state declarations from the\nsource.\n\n- Software agents\n[continuously](https://github.com/open-gitops/documents/blob/v1.0.0/GLOSSARY.md#continuous)\nobserve actual system state and [attempt to apply the desired\nstate](https://github.com/open-gitops/documents/blob/v1.0.0/GLOSSARY.md#reconciliation).\n\n\nAn example of GitOps is storing the Kubernetes manifests for a microservice\nin a GitLab project. Those Kubernetes resources are then continuously\nreconciled by a\n[controller](https://kubernetes.io/docs/concepts/architecture/controller/)\nrunning on the Kubernetes cluster where the microservice is deployed to.\nThis allows engineers to manage infrastructure using the same workflows as\nworking with regular code, such as opening merge requests to make and review\nchanges and versioning changes. GitOps also has operational benefits such as\n[preventing configuration\ndrift](https://about.gitlab.com/topics/gitops/#cicd) and helps engineers\naudit what changes led to certain outcomes with deployments.\n\n\n## Benefits and limitations of git in GitOps workflows\n\n\nWhile git is an essential piece of GitOps workflows, git repositories were\nnot designed to be deployed by GitOps controllers. Git does provide the\nability for engineers to collaborate on infrastructure changes and audit\nthese changes later on, but controllers do not need to download an entire\ngit repository for a successful deployment. GitOps controllers simply need\nthe infrastructure defined for a particular environment.\n\n\nAdditionally, an important piece of the deployment process is to [sign and\nverify\ndeployments](https://docs.sigstore.dev/about/overview/#why-cryptographic-signing)\nto assure deployment changes to an environment are coming from a trusted\nsource. While git commits can be signed and verified by GitOps controllers,\ncommits may also capture other details not related to the deployment itself\n(e.g., documentation changes, updates to other environments, and git\nrepository restructuring) or not enough of the deployment picture as a\ndeployment may consist of multiple commits. This again feels like a case\nthis git feature wasn’t designed for.\n\n\nAnother challenging aspect of git in GitOps workflows is that it can\nsometimes lead to more automation than expected. Soon after merging a change\nto the watched branch, it will be deployed. There are no controls in the\nprocess outside of git. How can you make sure that nothing gets deployed on\na Friday late afternoon? What if teams responsible for deployment do not\nhave permissions to merge changes in certain GitLab projects? Using OCI\nimages adds a pipeline into the process, including all the delivery control\nfeatures, like [approvals or deploy\nfreezes](https://docs.gitlab.com/ee/ci/environments/protected_environments.html).\n\n\n## OCI images\n\n\nThe [Open Container Initiative](https://opencontainers.org/) has helped to\ndefine standards around container formats. While most engineers are familiar\nwith building Dockerfiles into container images, many may not be as familiar\nwith storing Kubernetes manifests in a container registry. Because [GitLab’s\nContainer\nRegistry](https://docs.gitlab.com/ee/user/packages/container_registry/) is\nOCI compliant, it allows for users to push Kubernetes manifests for a\nparticular environment to a container registry. GitOps controllers, such as\n[Flux\nCD](https://about.gitlab.com/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab/),\ncan use the manifests stored in this OCI artifact instead of needing to\nclone an entire git repository.\n\n\nOften in GitOps workflows, a git repository can include the infrastructure\ndefinitions for all environments that a microservice will be deployed to. By\npackaging the Kubernetes manifests for only a specific environment, Flux CD\ncan download the minimum files needed to carry out a deployment to a\nspecific environment.\n\n\n### Security benefits of using OCI artifacts\n\n\nAs mentioned previously, signing and verifying the artifacts to be deployed\nto an environment adds an additional layer of security for software\nprojects. After Kubernetes manifests are pushed to a container registry, a\ntool like [Sigstore\nCosign](https://docs.sigstore.dev/quickstart/quickstart-cosign/) can be used\nto sign the OCI image with a private key that can be securely stored in a\nGitLab project as a [CI/CD\nvariable](https://docs.gitlab.com/ee/ci/variables/). Flux CD can then use a\npublic key stored on a Kubernetes cluster to verify that a deployment is\ncoming from a trusted source.\n\n\n## Using GitLab to push and sign OCI images\n\n\nGitLab offers many features that help simplify the process of packaging,\nsigning, and deploying OCI images. A common way to structure GitLab projects\nwith GitOps workflows is to have separate GitLab projects for microservices’\ncode and a single infrastructure repository for all microservices. If an\napplication is composed of `n` microservices, this would require having `n\n+1` GitLab projects for an application.\n\n\nThe artifact produced by a code project is usually a container image that\nwill be used to package the application. The infrastructure or delivery\nproject will contain the Kubernetes manifests defining all the resources\nrequired to scale and serve traffic to each microservice. The artifact\nproduced by this project is usually an OCI image used to deploy the\napplication and other manifests to Kubernetes.\n\n\nIn this setup, separation of environments is handled by defining Kubernetes\nmanifests in separate folders. These folders represent environments (e.g.,\ndevelopment, staging, and production) that will host the application. When\nchanges are made to the code project and a new container image is pushed,\nall that needs to be done to deploy these changes via GitLab’s integration\nwith Flux CD is to edit the manifests under the environment folder to\ninclude the new image reference and open a merge request. Once that merge\nrequest is reviewed, approved, and merged, the delivery project’s CI/CD job\nwill push a new OCI image that Flux CD will pick up and deploy to the new\nenvironment.\n\n\n![OCI images - flow\nchart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097611/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097611046.png)\n\n\nSigning an OCI image is as simple as including Cosign in your project’s\nCI/CD job. You can simply generate a new public and private key with Cosign\nby running the commands below locally. Just make sure to log in to your\nGitLab instance with the [glab\nCLI](https://gitlab.com/gitlab-org/cli/#installation) and replace the\n[`PROJECT_ID`] for the Cosign command with your [delivery project’s\nID](https://docs.gitlab.com/ee/user/project/working_with_projects.html#access-a-project-by-using-the-project-id).\n\n\n```\n\nglab auth login\n\ncosign generate-key-pair gitlab://[PROJECT_ID]\n\n```\n\n\nOnce the cosign command runs successfully, you can see the Cosign keys added\nto your project under the CI/CD variables section under the key names\n`COSIGN_PUBLIC_KEY` and `COSIGN_PRIVATE_KEY`.\n\n\n### Example CI/CD job\n\n\nA GitLab CI/CD job for pushing an OCI image will look something like the\nfollowing:\n\n\n```yaml\n\nfrontend-deploy:\n  rules:\n  - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    changes:\n      paths:\n      - manifests/dev/frontend-dev.yaml\n  trigger:\n    include:\n      - component: gitlab.com/components/fluxcd/oci-artifact@0.3.1\n        inputs:\n          version: 0.3.1\n          kubernetes_agent_reference: gitlab-da/projects/tanuki-bank/flux-config:dev\n          registry_image_url: \"oci://$CI_REGISTRY_IMAGE/frontend\"\n          image_tag: dev\n          manifest_path: ./manifests/dev/frontend-dev.yaml\n          flux_oci_repo_name: frontend\n          flux_oci_namespace_name: frontend-dev\n          signing_private_key: \"$COSIGN_PRIVATE_KEY\"\n```\n\n\nThe [GitLab CI/CD\nCatalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/)\noffers a GitLab-maintained [CI/CD component for working with OCI artifacts\nand Flux CD](https://gitlab.com/explore/catalog/components/fluxcd). This\ncomponent allows development teams to push Kubernetes manifests as OCI\nimages to GitLab’s Container Registry or an external container registry,\nsign the OCI image using Cosign, and immediately reconcile the newly pushed\nimage via Flux CD.\n\n\nIn the example above, the Flux CD `component` is included in a\n`.gitlab-ci.yml` file of a GitLab project. Using the component’s `inputs`,\nusers can define what registry to push the image to (i.e.,\n`registry_image_url` and `image tag`), the file path to Kubernetes manifests\nthat will be pushed (i.e., `manifest_path`), the cosign private key used to\nsign images (i.e., `signing_private_key`), and the Kubernetes namespace and\nFlux CD\n[OCIRepository](https://fluxcd.io/flux/components/source/ocirepositories/)\nname needed to sync updates to an environment (i.e.,\n`flux_oci_namespace_name` and `flux_oci_repo_name`).\n\n\nThe `kubernetes_agent_reference` allows GitLab CI/CD jobs to inherit the\n`kubeconfig` needed to access a Kubernetes cluster without needing to store\na `kubeconfig` CI/CD variable in each GitLab project. By setting up the\n[GitLab agent for\nKubernetes](https://docs.gitlab.com/ee/user/clusters/agent/), you can\nconfigure all GitLab projects’ CI/CD jobs in a [GitLab\ngroup](https://docs.gitlab.com/ee/user/group/) to inherit permissions to\ndeploy to the Kubernetes cluster.\n\n\nThe agent for Kubernetes context is typically configured wherever you\nconfigure the GitLab Agent for Kubernetes in your GitLab group. It is\ntypically recommended that this be done in the project where Flux CD is\nmanaged. More information on configuring the agent for CI/CD access can be\nfound in our [CI/CD workflow\ndocumentation](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html).\n\n\nThe variables `$COSIGN_PRIVATE_KEY`, `$FLUX_OCI_REPO_NAME`, and\n`$FRONTEND_DEV_NAMESPACE` are values stored as CI/CD variables to easily\naccess and mask these sensitive pieces of data in CI/CD logs. The\n`$CI_REGISTRY_IMAGE` is a variable that GitLab jobs have available by\ndefault that specifies the GitLab project’s container registry.\n\n\n### Deploy OCI images\n\n\nUsing [Flux CD with your GitLab\nprojects](https://docs.gitlab.com/ee/user/clusters/agent/gitops/flux_tutorial.html),\nyou can automate deployments and signing verification for your\nmicroservice’s environments. Once Flux CD is configured to sync from a\nGitLab project, you could add the following Kubernetes [custom resource\ndefinitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)\nto your project to sync your pushed OCI image.\n\n\n```yaml\n\napiVersion: v1\n\nkind: Namespace\n\nmetadata:\n  name: frontend-dev\n  labels:\n    name: frontend-dev\n---\n\napiVersion: bitnami.com/v1alpha1\n\nkind: SealedSecret\n\nmetadata:\n  name: cosign-public-key\n  namespace: frontend-dev\nspec:\n  encryptedData:\n    cosign.pub: AgAKgLf4VbVzJOmr6++k81LlFayx88AELaUQFNOaXmBF4G+fBfBYeABl0skNvMAa1UrPVNSfMIHgFoYHoO96g576a+epk6V6glOI+++XvYbfsygof3GGxe0nL5Qh2b3ge0fNpyd0kTPSjTj0YUhRhKtMGMRSRw1jrwhNcGxCHK+Byibs52v8Np49KsIkeZKbzLdgYABkrv+k0j7hQM+jR180NpG+2UiRvaXpPuogxkbj61FEqWGrJHk8IVyfl3eh+YhoXxOHGDqko6SUC+bUZPDBlU6yKegO0/8Zq3hwulrSEsEjzRZNK+RFVMOLWWuC6h+WGpYhAMcsZPwjjJ/y29KLNa/YeqkN/cdk488QyEFc6ehCxzhH67HxIn2PDa+KkEOTv2TuycGF+Q00jKIizXF+IwLx/oRb3pTCF0AoAY8D8N3Ey+KfkOjsBON7gGID8GbQiJqX2IgIZxFMk0JRzxbRKOEqn+guLd5Shj7CD1a1Mkk0DxBdbqrGv2XNYUaFPI7xd3rZXUJZlnv+fsmwswsiGWRuXwim45HScWzQnfgLAe7tv3spVEGeaO5apl6d89uN21PBQnfE/zyugB//7ZW9tSp6+CSMyc5HynxI8diafqiwKPgvzLmVWRnkvxJijoXicRr3sCo5RudZPSlnjfd7CKdhwEVvLl7dRR4e/XBMdxCzk1p52Pl+3/kJR+LJii5+iwOpYrpVltSZdzc/3qRd19yMpc9PWpXYi7HxTb24EOQ25i21eDJY1ceplDN6bRtop2quzkjlwVeE2i4cEsX/YG8QBtQbop/3fjiAjKaED3QH3Ul0PECS9ARTScSkcOL3I00Xpp8DyD+xH0/i9wCBRDmH3yKX18C8VrMq02ALSnlP7WCVVjCPzubqKx2LPZRxK9EG0fylwv/vWQzTUUwfbPQZsd4c75bSTsTvxqp/UcFaXA==\n  template:\n    metadata:\n      name: cosign-public-key\n      namespace: frontend-dev\n---\n\napiVersion: source.toolkit.fluxcd.io/v1beta2\n\nkind: OCIRepository\n\nmetadata:\n    name: frontend\n    namespace: frontend-dev\nspec:\n    interval: 1m\n    url: oci://registry.gitlab.com/gitlab-da/projects/tanuki-bank/tanuki-bank-delivery/frontend\n    ref:\n        tag: dev\n    verify:\n      provider: cosign\n      secretRef:\n        name: cosign-public-key\n---\n\napiVersion: kustomize.toolkit.fluxcd.io/v1\n\nkind: Kustomization\n\nmetadata:\n    name: frontend\n    namespace: frontend-dev\nspec:\n    interval: 1m\n    targetNamespace: frontend-dev\n    path: \".\"\n    sourceRef:\n        kind: OCIRepository\n        name: frontend\n    prune: true\n```\n\n\nThe\n[`Kustomization`](https://fluxcd.io/flux/components/kustomize/kustomizations/)\nresource allows for further customization of Kubernetes manifests and also\nspecifies which namespace to deploy resources to. The `OCIRepository`\nresource for Flux CD allows users to specify the OCI image repository\nreference and tag to regularly sync from. Additionally, you will notice the\n`verify.provider` and `verify.secretRef` properties. These fields allow you\nto verify that the OCI image deployed to the cluster was signed by the\ncorresponding Cosign private key used in the earlier CI/CD job.\n\n\nThe public key needs to be stored in a [Kubernetes\nsecret](https://kubernetes.io/docs/concepts/configuration/secret/) that will\nneed to be present in the same namespace as the `OCIRepository` resource. To\nhave this secret managed by Flux CD and not store the secret in plain text,\nyou can consider using\n[SealedSecrets](https://fluxcd.io/flux/guides/sealed-secrets/) to encrypt\nthe value and have it be decrypted cluster side by a controller.\n\n\nFor a simpler approach not requiring SealedSecrets, you can [deploy the\nsecret via a GitLab\nCI/CD](https://docs.gitlab.com/ee/user/clusters/agent/getting_started_deployments.html)\njob using the [`kubectl\nCLI`](https://kubernetes.io/docs/reference/kubectl/). In the non-sealed\nsecret approach, you would simply remove the SealedSecret included above and\nrun the job to deploy the public key secret before running the job to push\nthe new OCI image. This will make sure the secret is stored securely in\nGitLab and make sure the secret can be accessed on the cluster by the\nOCIRepository. While this approach is a bit simpler, just note this is not a\nsuitable approach for managing secrets in production.\n\n\n## The benefits of OCI, GitLab, and GitOps\n\n\nOCI artifacts allow for GitOps teams to take deployments even further with\nadded security benefits and allowing for deployments to be minimal. Users\nstill gain all the benefits offered by git as far as having a source of\ntruth for infrastructure and collaborating on projects. OCI images add a\npackaging approach that improves the deployment aspect of GitOps.\n\n\nGitLab continues to learn from our customers and the cloud native community\non building experiences that help simplify GitOps workflows. To get started\nusing some of the features mentioned in this blog, you can sign up for a\n[free trial of GitLab\nUltimate](https://about.gitlab.com/free-trial/). We would also love to hear\nfrom users about their experiences with these tools, and you can provide\nfeedback in the [community\nforum](https://forum.gitlab.com/t/oci-images-as-source-of-truth-for-gitops-with-gitlab/120965).\n",[9,827,1228,549,1105,917],{"slug":3464,"featured":6,"template":700},"how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery","content:en-us:blog:how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery.yml","How To Use Oci Images As The Source Of Truth For Continuous Delivery","en-us/blog/how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery.yml","en-us/blog/how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery",{"_path":3470,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3471,"content":3477,"config":3483,"_id":3485,"_type":14,"title":3486,"_source":16,"_file":3487,"_stem":3488,"_extension":19},"/en-us/blog/how-visualization-improves-the-gitlab-merge-train-experience",{"title":3472,"description":3473,"ogTitle":3472,"ogDescription":3473,"noIndex":6,"ogImage":3474,"ogUrl":3475,"ogSiteName":685,"ogType":686,"canonicalUrls":3475,"schema":3476},"How visualization improves the GitLab merge train experience","Merge train visualization lets users closely track merge train activities and take actions with a better understanding of the impact on other MRs in the queue.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098825/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2824%29_1KuzZLH1aSgBZsGVXGPIjf_1750098824773.png","https://about.gitlab.com/blog/how-visualization-improves-the-gitlab-merge-train-experience","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How visualization improves the GitLab merge train experience\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Payton Burdette\"},{\"@type\":\"Person\",\"name\":\"Veethika Mishra\"}],\n        \"datePublished\": \"2024-07-25\",\n      }",{"title":3472,"description":3473,"authors":3478,"heroImage":3474,"date":3480,"body":3481,"category":693,"tags":3482},[3479,1915],"Payton Burdette","2024-07-25","GitLab's merge train feature on the DevSecOps platform has worked wonders for organizations looking for a solution to automatically manage conflicts among different merge requests that are merged in close proximity to each other. [Merge trains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html) support all merge methods and ensure all MRs work together, which saves time and reduces the stress of breaking the default branch, especially for teams dealing with long build times or a small fleet of runners. Merge trains also alleviate some of the burden on developers who have to track the progress of other MRs before pushing the \"Merge\" button.\n\nDespite the benefits of a merge train, without having a UI to visualize its inner workings, users find it hard to trust the process. Sometimes it is difficult to distinguish failures caused by user actions from those due to flaky runs. \n\nMoreover, the lack of visibility into what else is queued before or after a particular MR has made users less confident when taking actions such as merging immediately or removing MRs from the merge train.\n\nTo address this gap in user experience, we are introducing merge train visualization in GitLab (Premium and Ultimate tiers) for better visibility into and tracking of the merge train queue.\n\n## Merge train visualization\n\nBased on findings from user research and feedback, we have defined a set of requirements for the first iteration of this feature. Here’s what you can expect.\n\n### View merge trains\n\nCurrently, when a merge request is added to the train, a link to the merge train details page is surfaced on the pipeline widget.\n\n![Merge train running](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098833/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098833102.png)\n\n### View list of MRs queued in a train\n\nWith the new merge train visualization, users can see a list of all MRs queued in the train. This transparency helps developers understand the order of merges and anticipate potential conflicts or issues. Knowing what is queued provides clarity and allows for better planning and coordination among team members.\n\n![List of MRs queued in the train](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098833/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098833102.png)\n\n### View list of MRs already merged by the train\n\nIn addition to seeing what is queued, users can also view a list of MRs that have already been successfully merged by the train. This historical context is valuable for tracking progress and understanding the sequence of changes that have been integrated into the default branch.\n\n![List of merged MRs in the train](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098833/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098833104.png)\n\n### Remove a merge request from the train straight from visualization\nThe new visualization also enables quick actions. The first action implemented is removing an MR from the merge train. This streamlined workflow reduces the time and effort required to manage merge trains, making it easier to respond to issues as they arise and maintain a smooth CI/CD pipeline.\n\n![Remove a merge request screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098833/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098833107.png)\n\n## The benefits of merge train visualization\n\nMerge train visualization has the following benefits: \n\n1. Enhanced transparency and trust\n- By visualizing the merge train, GitLab provides users with the transparency they need to trust the system. Understanding what is happening within the merge train reduces uncertainty and builds confidence in the automated process.\n2. Improved efficiency and collaboration\n- Teams can work more efficiently by having a clear view of the merge train. Developers can better coordinate their efforts, avoid redundant work, and quickly address issues. This collaborative approach ensures smoother and faster integration of changes.\n3. Reduced risk of failures\n- With visibility into the merge train, users can identify and address potential conflicts or failures early. This proactive approach minimizes the risk of breaking the default branch, leading to more stable and reliable builds.\n\n## What’s next?\n\nAs we learn more about how users interact with the merge train visualization, we intend to [add more capabilities](https://gitlab.com/gitlab-org/gitlab/-/issues/277391/designs/mr-visualization-as-a-list.png) to the list view. Early ideas include displaying estimated time to merge, ability to re-order, and displaying removed merge requests from the train. If you have ideas that you want to share, don’t forget to leave a comment on [our feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/464774).\n\nWe believe that the merge train visualization will significantly enhance the user experience for developers using GitLab. By providing a clear and actionable view of the merge train, we aim to make the merge process more transparent, efficient, and reliable.\n\n> Sign up for a [free trial of GitLab Ultimate](https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/blog&glm_content=default-saas-trial) to test-drive merge train visualization.\n",[9,695,693],{"slug":3484,"featured":91,"template":700},"how-visualization-improves-the-gitlab-merge-train-experience","content:en-us:blog:how-visualization-improves-the-gitlab-merge-train-experience.yml","How Visualization Improves The Gitlab Merge Train Experience","en-us/blog/how-visualization-improves-the-gitlab-merge-train-experience.yml","en-us/blog/how-visualization-improves-the-gitlab-merge-train-experience",{"_path":3490,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3491,"content":3496,"config":3502,"_id":3504,"_type":14,"title":3505,"_source":16,"_file":3506,"_stem":3507,"_extension":19},"/en-us/blog/how-we-increased-our-release-velocity-with-gitlab",{"title":3492,"description":3493,"ogTitle":3492,"ogDescription":3493,"noIndex":6,"ogImage":3122,"ogUrl":3494,"ogSiteName":685,"ogType":686,"canonicalUrls":3494,"schema":3495},"How we increased our release velocity with GitLab","Learn Evolphin's challenges, reasons for choosing the DevSecOps platform, and our end state following the transition.","https://about.gitlab.com/blog/how-we-increased-our-release-velocity-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we increased our release velocity with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rahul Bhargava, CTO, Evolphin\"}],\n        \"datePublished\": \"2022-12-05\",\n      }",{"title":3492,"description":3493,"authors":3497,"heroImage":3122,"date":3499,"body":3500,"category":1288,"tags":3501},[3498],"Rahul Bhargava, CTO, Evolphin","2022-12-05","\nAt Evolphin, we have a remotely-distributed software development team creating the [Evolphin Zoom Media Asset Management system](https://evolphin.com/media-asset-management/). Our core R&D team is split across multiple geographies, with staff in India, the U.S., and the Philippines, as well as freelancers around the world. We needed to find new ways to address our team challenges and increase the pace of delivery of our product updates to Evolphin Zoom suite, in response to our customer needs. This blog outlines our challenges, reasons for choosing GitLab, and our end state, including a 30% to 40% increase in our release velocity, following the transition.\n\n## What is a media asset management system? \n\nWith the increased demand for video content for entertainment, marketing, customer engagement, etc., media asset management systems have become increasingly popular for collaborating, organizing, and archiving rich media assets. \n\nThe assorted camera card types, encoding formats, and publishing demands of social media and other video-on-demand services create a heterogenous content creation and publishing industry desperate for order. Media asset management systems are a timely answer to the problem of managing and unifying the diverse media assets characteristic of the industry.\n\nAt Evolphin, we’re at the heart of this solution with the Evolphin Zoom Media Asset Management system, an enterprise offering that runs on approximately 4.7 million lines of source code. To address the root of the problem, media asset management products like Evolphin Zoom must rapidly evolve - add new or enhance existing features - to meet customers’ ever-changing needs.\n\n## The problem: Slow updates\n\nBefore adopting GitLab, we used Subversion (Tortoise as the UI) as our source code repository and software version management system. We chose Subversion at the time because we needed an on-premises solution, as cloud-based branch management was not widely adopted in 2012 when we started working on the Evolphin Zoom. \n\nOur branching and merging workflow with Subversion was tedious, slow, and complicated. It took us around four to five weeks to manually manage and merge software changes across branches within this system. This meant that releasing each product update took five weeks at the very minimum. \n\n## Our requirement: Better collaboration for branch management\n\nWe needed a more agile solution to remain responsive to our customers' needs in this fast-paced software development environment. \n\nAs we transitioned to a remotely distributed workforce model, we identified a need for a software version management system designed with decentralized teams in mind. We wanted to be able to create a user story for a new feature in one week, test it with beta users the next week, and release it in production the week after. \n\nFor this level of agility, an affordable, open-source software repository with a platform like GitLab seemed the perfect solution.\n\n## Why GitLab?\n\nWith all the necessary tools for software review management and collaboration, GitLab appeared to fit our needs. \n\nThe ability to remotely check changes into a feature branch meant that users could check in a version and trigger a merge request for approval before merging changes from the remote user’s branch into the main software development branch. \n\nAll these features were available under GitLab’s free community version, with a user-friendly, visually-appealing UI that eased our transition from on-premises to cloud-based development. \n\n## End-state with GitLab\n\nHere is our workflow in numbers:\n\n| Total GitLab projects managed | 44  \t   \t\t\t\t\t\t|\t\n| Total branches \t\t\t\t| 514\t   \t\t\t\t\t\t|\n| Total repo size\t\t\t\t| 10.03 GB \t\t\t\t\t\t|\n| Total users\t\t\t\t\t| 33\t   \t\t\t\t\t\t|\n| Total groups\t\t\t\t\t| 15 \t   \t\t\t\t\t\t|\n| MFA-enabled\t\t\t\t\t| Yes \t   \t\t\t\t\t\t|\n| Number of files\t\t\t\t| 26125 text files  \t\t\t|\n| Number of unique files\t\t| 25090 unique files\t\t\t|\n| Code\t\t\t\t\t\t\t| 4,738,187 lines of code \t   \t|\n| GitLab product plan\t\t\t| Community plan on the cloud\t|\n\n\nOur new workflow depends on GitLab as the single source of truth for all our source code, binary dependencies, and DevOps projects. We currently have GitLab integrations with our CI/CD pipeline using Jenkins and our issue-tracking system - JetBrains YouTrack. Besides source code management (SCM), we use code review features frequently. In addition,  all our internal docs, requirements gathering, tips and tricks between developers, DevOps, and QA are shared in Wiki. All our collaboration happens over GitLab Wikis and SCM. Our developers and DevOps engineers use the same GitLab repo to make it easy to manage source code and build artifacts for deployment.\n\nSince the pandemic started, we have executed several Amazon Web Services (AWS) cloud-based deployments. Some of our DevOps projects in GitLab are integrated with the AWS cloud formation stacks/scripts to enable consistent tenant deployments for our cloud customers.\n\n## Impact on Evolphin’s customers\n\nThe biggest transformation we noticed from adopting GitLab was a more seamless, collaborative, and efficient workflow for our R&D teams. \n\nFor example, a bug fix could be implemented in branches by developers working in parallel, which could then be merged into a pre-production branch for QA. Following the QA review, changes can be pushed to the main production branch for release. \n\nBeing open source, we can easily integrate with CI/CD platforms and the new workflow significantly improved our productivity regarding feature releases, especially taking into consideration our high volume of product updates. With GitLab, we can execute feature releases two to three weeks faster than previously. This includes twice-monthly feature changes, and monthly security updates, with annual major product changes. Overall, our release velocity increased by 30% to 40% just by switching from Subversion to a GitLab-based workflow.\n\n_Rahul Bhargava is the CTO and founder of Evolphin Software._\n",[828,873,9],{"slug":3503,"featured":6,"template":700},"how-we-increased-our-release-velocity-with-gitlab","content:en-us:blog:how-we-increased-our-release-velocity-with-gitlab.yml","How We Increased Our Release Velocity With Gitlab","en-us/blog/how-we-increased-our-release-velocity-with-gitlab.yml","en-us/blog/how-we-increased-our-release-velocity-with-gitlab",{"_path":3509,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3510,"content":3514,"config":3520,"_id":3522,"_type":14,"title":3523,"_source":16,"_file":3524,"_stem":3525,"_extension":19},"/en-us/blog/how-we-supercharged-gitlab-ci-statuses-with-websockets",{"config":3511,"ogTitle":3512,"ogDescription":3513,"title":3512,"description":3513},{"noIndex":6},"How we supercharged GitLab CI statuses with WebSockets","Learn how we reduced API calls on GitLab's CI job status updates from 45 million to 3.4 million calls per day.",{"title":3512,"description":3515,"authors":3516,"heroImage":1281,"date":3517,"body":3518,"category":693,"tags":3519},"Learn how we reduced API calls on GitLab's CI job status updates from 45 million to 3.4 million calls per day. Users now see job status changes instantly instead of waiting up to 30 seconds.",[3479],"2025-09-15","We just reduced API calls by 92.56% on GitLab's CI job status updates – from 45 million to 3.4 million calls per day. Instead of needing to wait up to half a minute, users now see job status changes instantly. Here's how we did it.\n\n## The problem: Polling in 2025\nIt's 2025, WebSockets are in and polling is out. Polling is more of a legacy method of getting \"real-time\" updates for software. It's time-driven, meaning clients make network calls to a server on an interval usually between 5 and 30 seconds. Even if the data hasn't changed, those network requests are made to try and get the most accurate data served to the client.\nWebSockets are event-driven, so you only make network requests to the server when the data has actually changed, i.e., a status in a database column changes from `pending` to `running`. Unlike traditional HTTP requests where the client repeatedly asks the server for updates (polling), WebSockets establish a persistent, two-way connection between the client and server. This means the server can instantly push updates to the client the moment something changes, eliminating unnecessary network traffic and reducing latency. For monitoring job statuses or real-time data, this is far more efficient than having clients poll the server every few seconds just to check if anything is different. \n## The transformation\nPreviously, the job header on the job log view was utilizing polling to get the most recent status for a single job. That component made a network request every 30 seconds no matter what to try and get the true state of the job.\n![Job header on job log view](https://res.cloudinary.com/about-gitlab-com/image/upload/v1757932872/b4zsw0zaasxnu5mm7szu.png)\n\nOur metrics showed that:\n* **547,145** network calls happened per 15 minutes\n* **45,729,530** network calls happened per 24 hours\nUsers experienced frustrating delays seeing status updates, and we were hammering our database.\n## Enter GraphQL subscriptions\n\nIn comes GraphQL subscriptions with WebSockets. GraphQL subscriptions are a feature that extends GraphQL beyond simple request-response queries and mutations, allowing clients to maintain a real-time connection to the server. While regular GraphQL queries fetch data once and return it, subscriptions let you say 'notify me whenever this specific data changes.' Under the hood, GraphQL subscriptions typically use WebSockets to maintain that persistent connection. Here's what we did:\n1. First, we refactored the job header component to use GraphQL for its data\n2. Then we implemented a GraphQL subscription to serve real-time updates with ActionCable (Rails' WebSocket framework).\n## The results\nAfter this implementation, our users now get truly real-time accurate job status – updates appear instantly when jobs change state. The performance gains are remarkable:\n* **92.56% reduction** in API calls for this component\n* Now averaging **39,670** network calls per 15 minutes (down from 547,145)\n* Only **3,403,395** network calls per 24 hours (down from 45,729,530)\nWe also monitored CPU utilization and operation rate per command over the last week and have not seen any significant increase on our services. Win-win for the software and the team.\n## What's next\nThis is just the beginning. We're working on making every CI status in the GitLab product real-time. Currently, many parts of GitLab's UI still rely on polling to check for updates. Our goal is to systematically replace these polling mechanisms with GraphQL subscriptions, giving users instant feedback across the entire CI/CD workflow.\nWant to see this capability in action? Check out any job log view and watch those status updates fly. Not a GitLab user yet? [Try GitLab Ultimate with GitLab Duo Enterprise](https://about.gitlab.com/free-trial/devsecops/) for free for 30 days.",[693,695,9],{"featured":6,"template":700,"slug":3521},"how-we-supercharged-gitlab-ci-statuses-with-websockets","content:en-us:blog:how-we-supercharged-gitlab-ci-statuses-with-websockets.yml","How We Supercharged Gitlab Ci Statuses With Websockets","en-us/blog/how-we-supercharged-gitlab-ci-statuses-with-websockets.yml","en-us/blog/how-we-supercharged-gitlab-ci-statuses-with-websockets",{"_path":3527,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3528,"content":3534,"config":3541,"_id":3543,"_type":14,"title":3544,"_source":16,"_file":3545,"_stem":3546,"_extension":19},"/en-us/blog/how-we-use-gitlab-at-the-province-of-nova-scotia",{"title":3529,"description":3530,"ogTitle":3529,"ogDescription":3530,"noIndex":6,"ogImage":3531,"ogUrl":3532,"ogSiteName":685,"ogType":686,"canonicalUrls":3532,"schema":3533},"How we use GitLab at the Province of Nova Scotia","The Unix operations team at the Province of Nova Scotia decided to implement GitLab for source control and CI/CD. Here's how we started exploring DevOps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670226/Blog/Hero%20Images/how-we-use-gitlab-at-nova-scotia.jpg","https://about.gitlab.com/blog/how-we-use-gitlab-at-the-province-of-nova-scotia","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we use GitLab at the Province of Nova Scotia\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Steven Zinck\"},{\"@type\":\"Person\",\"name\":\"Paul Badcock\"}],\n        \"datePublished\": \"2017-07-18\",\n      }",{"title":3529,"description":3530,"authors":3535,"heroImage":3531,"date":3538,"body":3539,"category":718,"tags":3540},[3536,3537],"Steven Zinck","Paul Badcock","2017-07-18","\n\nIn 2015 the Unix operations team at the Province of Nova Scotia decided to implement GitLab for source control and [Continuous Integration and Continuous Deployment](/solutions/continuous-integration/). This was the beginning of our foray into DevOps practices. This article describes our automated testing, integration and release of Puppet code.\n\n\u003C!-- more -->\n\n![Image via Steve Zinck and Paul Badcock](https://about.gitlab.com/images/blogimages/nova-scotia-devops/devops-infinity-graphic.png){: .shadow}\u003Cbr>\n\nYou can also learn more about our DevOps transformation by watching our recent interview:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/SHdeqznJXbc\" frameborder=\"0\" allowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\n### Source control\n\nA source control management (SCM) system allows the user to “commit” code, documentation and other system artifacts such as configuration files to a central location. Each change results in a new version of the file, and previous versions of the file remain available on the SCM. Restoring a previous version is quick and easy.\n\nWe needed a way for multiple sysadmins to be able to work on code without colliding with one another. We also needed a way to vet changes through a peer review process. GitLab makes this easy thanks to its support of branching and merge requests. Branching allows a sysadmin to create an individual copy of the production code (“master”) and work with it in isolation — this allows multiple team members to be working on the same production code base without being concerned about conflicts between their work.\n\n### Continuous integration\n\n![Image via Steve Zinck and Paul Badcock](https://about.gitlab.com/images/blogimages/nova-scotia-devops/ci-cd-workflow.png){: .shadow}\u003Cbr>\n\nAs we built out more of our infrastructure with Puppet, we needed an automated way of testing our code. Over time, our test strategy has evolved to include automated [syntax checking](https://puppet.com/blog/verifying-puppet-checking-syntax-and-writing-automated-tests), [linting](http://puppet-lint.com/), [unit](https://puppet.com/blog/unit-testing-rspec-puppet-for-beginners) and [integration](http://serverspec.org/) tests. Manual testing was not sufficient, as it was often forgotten about and was very time consuming. Automated testing solved that — for every code commit, the test pipeline is executed. A complete test cycle currently takes under five minutes.\n\nOn each code commit to a branch other than master, the following test pipeline is kicked off by GitLab CI:\n\n![Image via Steve Zinck and Paul Badcock](https://about.gitlab.com/images/blogimages/nova-scotia-devops/ci-screenshot.png){: .shadow}\u003Cbr>\n\nIf at any point a job fails, the pipeline stops and the sysadmin is notified. One of the great features of GitLab CI is its tight integration with Docker — each of the jobs above is run inside its own isolated container. The syntax-lint-spec job verifies that the Puppet syntax is good; linting confirms the code conforms to best practices; and spec confirms that logically the code functions as designed.\n\nThe test-kitchen jobs are a full suite of [ServerSpec](http://serverspec.org/) tests. We automatically provision four containers that represent our four most common configurations. Our Puppet code is applied to each container to verify that it will work in our production environment. This acts as a full regression test each time a code commit is made, and ensures that there were no unintended problems introduced. It gives us confidence that the code is actually doing what it’s intended to do.\n\n### Continuous deployment\n\nOnce all of the tests pass, the sysadmin can submit a merge request for their branch, and it will be reviewed by a senior staff member before reaching production. This is an important part of our workflow, because it gives junior staff the confidence that a more senior member of the team will review and approve a change before it reaches any of our servers. If the merge request is accepted, the branch will be merged into master and at that point GitLab CI will push the code to our Red Hat Satellite and Puppet Enterprise servers where it will be deployed to our environment.\n\n![Image via Steve Zinck and Paul Badcock](https://about.gitlab.com/images/blogimages/nova-scotia-devops/cd-screenshot.jpeg){: .shadow}\u003Cbr>\n\nYou can find the configuration files (Dockerfiles, .kitchen.yml, .gitlab-ci.yml and Satellite push script) at our [GitHub](https://github.com/nsgov).\n\nThe implementation of our system automation strategy and the toolset we selected has proven itself many times. We are spending less time fighting fires due to the streamlined and tested nature of our deployments and have earned the confidence of our clients.\n\n### The road ahead\n\nIn upcoming articles, we’ll write about the CI/CD process we built with [Communications Nova Scotia](https://novascotia.ca/cns/) that allows their development team to deploy and roll back their Dockerized application environment on demand. We also plan to write about our automated test strategy for Red Hat Ansible.\n\nThis post originally appeared on [*Medium*](https://medium.com/@szinck/how-we-use-gitlab-at-the-province-of-nova-scotia-708b514cc47f).\n\n## About the Guest Authors\n\n[Steve Zinck](https://www.linkedin.com/in/stevezinck/) spent most of his career working in the Public Service as a Unix and Infrastructure administrator. Over the past few years, he's started to transition away from traditional systems administration and begun to focus on software delivery and automation. As part of that transition, his team has implemented GitLab at the core of our automation and software delivery stack. His current focus is working with software and application teams to assist in streamlining their deployment and delivery process.\n\n[Paul Badcock](https://www.linkedin.com/in/pbadcock/?ppe=1) started working in the IT sector in 1998 with positions in small startups, to large fortune 500 companies, to currently on a public-sector team. His career was focused as a traditional IT Linux administrator until in the mid-2000s he started focusing on adopting development tooling, practices and methodologies for operational teams. This work culminated in implementing an early 2010s DevOps workplace framework with the help of @stewbawka and subsequently working with like-minded teams since. As a part of adopting developer tools he has previously worked with and managed CVS, SVN installations and various vendor products before reading a “Show HN” posting on Hacker News about GitLab.\n",[721,9,875],{"slug":3542,"featured":6,"template":700},"how-we-use-gitlab-at-the-province-of-nova-scotia","content:en-us:blog:how-we-use-gitlab-at-the-province-of-nova-scotia.yml","How We Use Gitlab At The Province Of Nova Scotia","en-us/blog/how-we-use-gitlab-at-the-province-of-nova-scotia.yml","en-us/blog/how-we-use-gitlab-at-the-province-of-nova-scotia",{"_path":3548,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3549,"content":3555,"config":3561,"_id":3563,"_type":14,"title":3564,"_source":16,"_file":3565,"_stem":3566,"_extension":19},"/en-us/blog/how-we-used-gitlab-to-automate-our-monthly-retrospectives",{"title":3550,"description":3551,"ogTitle":3550,"ogDescription":3551,"noIndex":6,"ogImage":3552,"ogUrl":3553,"ogSiteName":685,"ogType":686,"canonicalUrls":3553,"schema":3554},"How we use GitLab to automate our monthly retrospectives","How one engineering team is using GitLab CI to automate asynchronous retrospectives, making collaboration across four continents a breeze.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670529/Blog/Hero%20Images/automate-retrospectives.jpg","https://about.gitlab.com/blog/how-we-used-gitlab-to-automate-our-monthly-retrospectives","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we use GitLab to automate our monthly retrospectives\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sean McGivern\"}],\n        \"datePublished\": \"2019-03-07\",\n      }",{"title":3550,"description":3551,"authors":3556,"heroImage":3552,"date":3558,"body":3559,"category":718,"tags":3560},[3557],"Sean McGivern","2019-03-07","\n\nAs an [Engineering\nManager] at GitLab I spend most of\nmy working day using GitLab for a variety of tasks – from using [issue boards](/stages-devops-lifecycle/issueboard/) for team assignments, [epics](https://docs.gitlab.com/ee/user/group/epics/) for tracking longer-term initiatives, and [todos](https://docs.gitlab.com/ee/user/todos.html) and notifications to manage my own workflow.\n\nWe also use GitLab in a number of unconventional ways, so I wanted to share with you one interesting use case we've been experimenting with.\n\n[Engineering Manager]: /handbook/engineering/management/\n\n## GitLab stage group retrospectives\n\nEach [stage group](/stages-devops-lifecycle/) at GitLab has its [own retrospective], which then feeds into the\n[GitLab-wide retrospective] we have for each monthly release.\n\n[own retrospective]: /handbook/engineering/management/group-retrospectives/\n[GitLab-wide retrospective]: /handbook/engineering/workflow/#retrospective\n\nThe [Plan team](/handbook/engineering/development/dev/plan/) is fairly widely\ndistributed: we have people on four continents, and only two members of the team\nare even in the same country as each other. We wanted to try [asynchronous\ncommunication] wherever possible, so we used GitLab issues for [our\nretrospectives], too.\n\nA quick note on terminology: we say [team] to refer to a manager – like me – and\ntheir reports. We say [stage group] to refer to the people who work on a\nparticular [DevOps stage], even across multiple teams. The Plan stage group is\neven more widely distributed.\n{: .note}\n\n[team]: /company/team/structure/#team-and-team-members\n[stage group]: /company/team/structure/#stage-groups\n[DevOps stage]: /handbook/product/categories/#devops-stages\n[asynchronous communication]: /handbook/communication#internal-communication\n[our retrospectives]: https://gitlab.com/gl-retrospectives/plan/issues?label_name[]=retrospective\n\n## Automating retrospective issue creation\n\nCreating the retrospective issue was fast, but adding links to notable\nissues that we shipped or that slipped was time consuming and\ntedious. In the spirit of [xkcd 1319], I decided to automate it, so I\ncreated the [async-retrospectives] project. This project makes\nretrospective issue creation a hands-off process:\n\n[xkcd 1319]: https://xkcd.com/1319/\n[async-retrospectives]: https://gitlab.com/gitlab-org/async-retrospectives\n\n1. It uses [scheduled pipelines] to create an issue on the 1st of each\n   month. As our [development month] runs from the 8th to the 7th, this\n   is a little early, but it allows the team to jot down any thoughts\n   they have while they are still working on the release.\n\n   ![](https://about.gitlab.com/images/blogimages/how-we-used-gitlab-to-automate-our-monthly-retrospectives/scheduled-pipelines.png){: .shadow}\n2. The issue is created using the standard [GitLab API], using a [protected\n   variable] to hold the credentials.\n3. When we create the issue, we use [quick actions] to add the correct\n   labels and due date in a convenient way. (This is also possible\n   without quick actions, but quick actions are more convenient for me\n   personally.)\n4. Another scheduled pipeline runs on the 9th of each month to update\n   the existing issue's description with the lists of issues (slipped,\n   shipped) I mentioned above.\n\n   We make our retrospectives public after we conclude them, so you can see this\n   in action on the [11.8 Plan retrospective]:\n\n   [![](https://about.gitlab.com/images/blogimages/how-we-used-gitlab-to-automate-our-monthly-retrospectives/11-8-plan-retrospective.png){: .shadow}][11.8 Plan retrospective]\n\n[scheduled pipelines]: https://docs.gitlab.com/ee/ci/pipelines/schedules.html\n[development month]: /handbook/engineering/workflow/#product-development-timeline\n[GitLab API]: https://docs.gitlab.com/ee/api/\n[protected variable]: https://docs.gitlab.com/ee/ci/variables/#protected-variables\n[quick actions]: https://docs.gitlab.com/ee/user/project/quick_actions.html\n[11.8 Plan retrospective]: https://gitlab.com/gl-retrospectives/plan/issues/22\n\nI only intended this for use in Plan, but a nice thing about a company where we\n[give agency] to people to solve their problems is that people like me are able\nto try out things that might not work globally, like this.\n\nAs it happened, it's also been [picked up by other teams and groups]. We\nconfigure the creation in a [YAML file], just like GitLab CI is configured, to\ntry to make it as easy as possible for other managers to contribute and set this\nup for their team.\n\n[give agency]: https://handbook.gitlab.com/handbook/values/#give-agency\n[picked up by other teams and groups]: https://gitlab.com/gitlab-org/async-retrospectives/merge_requests?state=merged\n[YAML file]: https://gitlab.com/gitlab-org/async-retrospectives/blob/master/teams.yml\n\n## Our experience running asynchronous retrospectives\n\n### What works\n\nWe've had a lot of positive experiences from these asynchronous\nretrospectives. In particular:\n\n1. No one is disadvantaged because of their time zone. If we had a video call\n   with our time zone spread, we'd have some people on that call in the middle of\n   their night, or missing out completely.\n2. Because they are written down from the start, and because comments in GitLab\n   are linkable, we can very easily refer to specific points in the future.\n3. Also, because they are written down, the comments can include links to\n   specific issues and merge requests to help other people get the same context.\n\n### What needs improvement\n\nAsynchronous retrospectives aren't perfect, of course. Some of the downsides\nwe've noticed are:\n\n1. Video calls are simply better for some things. In particular, the discussion\n   does not flow as smoothly in text as it can in a verbal conversation.\n\n   We also conduct our [engineering-wide retrospective] in a [public video\n   call], so we retain some opportunity for synchronous discussion.\n2. Similarly, team bonding is slower in text than in video calls.\n3. Participation can be lower if it's something you don't have to do right now,\n   but can always defer to a later date. We are continually [looking for ways to improve\n   this].\n\nOver all, we don't intend to go back to video calls for retrospectives,\nand we're really happy with the results. You can see all public\nretrospectives from the teams and groups at GitLab in the [GitLab\nretrospectives group on GitLab.com].\n\n[engineering-wide retrospective]: https://docs.google.com/document/d/1nEkM_7Dj4bT21GJy0Ut3By76FZqCfLBmFQNVThmW2TY/edit\n[public video call]: /2017/02/14/our-retrospective-and-kickoff-are-public/\n[looking for ways to improve this]: https://gitlab.com/gitlab-org/async-retrospectives/issues/12\n[GitLab retrospectives group on GitLab.com]: https://gitlab.com/gl-retrospectives\n\nPhoto by [Daniele Levis Pelusi](https://unsplash.com/photos/Pp9qkEV_xPk?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/automation?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,873,1064,875],{"slug":3562,"featured":6,"template":700},"how-we-used-gitlab-to-automate-our-monthly-retrospectives","content:en-us:blog:how-we-used-gitlab-to-automate-our-monthly-retrospectives.yml","How We Used Gitlab To Automate Our Monthly Retrospectives","en-us/blog/how-we-used-gitlab-to-automate-our-monthly-retrospectives.yml","en-us/blog/how-we-used-gitlab-to-automate-our-monthly-retrospectives",{"_path":3568,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3569,"content":3575,"config":3583,"_id":3585,"_type":14,"title":3586,"_source":16,"_file":3587,"_stem":3588,"_extension":19},"/en-us/blog/how-we-user-research-transformed-gitlab-runner-fleet-dashboard-visibility-and-metrics",{"title":3570,"description":3571,"ogTitle":3570,"ogDescription":3571,"noIndex":6,"ogImage":3572,"ogUrl":3573,"ogSiteName":685,"ogType":686,"canonicalUrls":3573,"schema":3574},"GitLab Runner Fleet dashboard improved through user research","Learn how GitLab user research drives the product development process when enabling more runner fleet features.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666543/Blog/Hero%20Images/lightvisibility.png","https://about.gitlab.com/blog/how-we-user-research-transformed-gitlab-runner-fleet-dashboard-visibility-and-metrics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How user research transformed GitLab Runner Fleet dashboard visibility and metrics\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gina Doyle\"}],\n        \"datePublished\": \"2023-11-07\",\n      }",{"title":3576,"description":3571,"authors":3577,"heroImage":3572,"date":3579,"body":3580,"category":718,"tags":3581},"How user research transformed GitLab Runner Fleet dashboard visibility and metrics",[3578],"Gina Doyle","2023-11-07","\nContinuous integration and continuous deployment (CI/CD) are a crucial part of the product development workflow. Companies depend on CI/CD to get new software features, bug fixes, and improvements out the door quickly. At GitLab, runners are at the core of CI/CD and are needed to build, test, and deploy code. [GitLab Runner](https://gitlab.com/gitlab-org/gitlab-runner) is the open source project that is used to run CI/CD jobs and send the results back to GitLab. However, since GitLab's early years, GitLab Runner has been code-centric with limited UI capabilities. We recently embarked on a journey to change that – follow along to see how we gathered user input and made desired improvements to the visibility and metrics of the GitLab Runner Fleet dashboard.\n\n## Managing runners\nAs GitLab scaled as a company, so did the number of GitLab users with complex and evolving use cases. In the past five years, we have seen a radical increase in the need for a best-in-class experience when managing a large number of self-managed runners. This need has led us to put more time and focus into improving how GitLab manages runners and how it supports users in making decisions quickly and effectively.\n\nTo that end, we’ve been making incremental changes to the runner fleet management experience, including improving the general usability of admin and group runner pages, providing more data around runners such as jobs run and status checks, and improving the runner creation process so it’s more secure and easier to follow. By doing this, we built a better underlying system so we could add new features easily.\n\nHowever, runner admins and platform engineers shared this recurring problem with us: \n- It is difficult to get an at-a-glance view of my fleet of runners, including how they are performing (how fast they pick up jobs, which ones are running the most jobs, etc.) and what issues (if any) are present that need to be fixed. \n\nIn addition to this problem, the GitLab Runner Fleet team was also running into issues with the performance of runner pages and with scalability when trying to add new features. This was a perfect opportunity to learn more about the problem users were facing and to innovate to extend our runner offering.\n\n## Gathering insights and exploring proposals\nTo fully understand the problem at hand and help make the requirements more clear, we carried out [problem validation](https://about.gitlab.com/handbook/product/ux/ux-research/problem-validation-and-methods/) research. We held [moderated in-depth interviews](https://www.usability.gov/how-to-and-tools/methods/individual-interviews.html) and sifted through much of our existing data from previous interviews. As we gained confidence in our understanding of the problem, we created a first iteration of the design to be tested with users through [moderated usability testing](https://about.gitlab.com/handbook/product/ux/ux-research/usability-testing/#different-types-of-usability-testing), which would [determine whether the solution really did solve the problem](https://about.gitlab.com/handbook/product/ux/ux-research/solution-validation-and-methods/).\n\nThis first design proposal focused on: \n- a general overview of the fleet, broken down by types (instance, group, project runners) and status\n- visibility into runner system failures\n- a general concept of runner load - how many jobs are running at once out of how many possible jobs the runner can run?\n- how long it takes for runners to pick up jobs\n= a list of runner events (job failures, status changes, upgrades, etc.)\n\n![Initial design of dashboard 1](https://about.gitlab.com/images/blogimages/2023-11-01-how-we-used-research-to-provide-visibility-into-runner-fleets/initial-design-1.png)\n\n\n![Initial design of dashboard 2](https://about.gitlab.com/images/blogimages/2023-11-01-how-we-used-research-to-provide-visibility-into-runner-fleets/initial-design-2.png)\n\n\n## Testing the usability of iteration\nWe ran moderated usability testing sessions so we could measure user responses and satisfaction based on a set of consistent questions across multiple participants. We used a Figma prototype and had participants complete tasks that connected back to the problem we were solving. \n\nAn advantage of running moderated sessions compared to unmoderated sessions is that we could tailor our follow-up questions as required once participants completed a task or provided an answer. After completing these sessions, we summarized the data we received into the following key insights to create the MVC (minimal viable change) of the runner fleet dashboard:\n1. Runner failures/errors are crucial to identify problems (voted the most important feature on the dashboard).\n2. Online and offline runners matter the most in terms of status breakdowns for a fleet.\n3. Visibility into busy runners (tied for second most important feature on the dashboard) helps users see individual runner load.\n4. Wait time to pick up a job was tied for the second most important feature on the dashboard and seeing this over time with more configuration options can help identify where to make optimizations in the fleet.\n\nThere are many other features requested by participants that should be handled in follow-up iterations of the dashboard. See [this epic](https://gitlab.com/groups/gitlab-org/-/epics/10631) for more information.\n\n## Updating the designs\nOur next step was to update the designs to consider the research we ran.\n\n### Responding to feedback\n\n1) Wait times\n\n**What we heard:**\n- “Right now, there is very little information available as to how soon a CI build might start. Oftentimes, users are left wondering why jobs won’t run.” \n- “It's mostly reactive for us at this point anyway when, as you know, we get users reporting problems, we might want to go look at wait times here. And be able to dig down on those to see who's waiting...”\n\n**What we did:**\n- Added an in-depth visualization of wait times for all instance runners in the fleet in the past three hours and included percentiles to give users a true representation of the wait times. By providing the data over this interval, we enable runner admins to quickly get a sense of how their runners are performing and if there are any issues with the fleet that would cause jobs to stay in pending state.\n\n![Wait time graph](https://about.gitlab.com/images/blogimages/2023-11-01-how-we-used-research-to-provide-visibility-into-runner-fleets/wait-time-graph.png)\n\n2) Runner loads\n\n**What we heard:**\n- “I have three build servers that are shared amongst many projects and in order for me to ensure each build server is properly set up, it's important for me to track builds by server. So, if one particular server is having issues, I need to be able to focus on that server.”\n\n**What we did:**\n- To start indicating some data on runner load, we’ve added a list of the top five busiest runners based on the number of running jobs they have at the moment, ranked from highest to lowest. This should help when analyzing concurrency settings and seeing if runners really need the capacity set for them.\n\n![Active runners](https://about.gitlab.com/images/blogimages/2023-11-01-how-we-used-research-to-provide-visibility-into-runner-fleets/active-runners.png)\n\n3) Understanding of most recent failures\n\n**What we heard:**\n- “We actually have a dashboard on Datadog that gives us error counts and errors coming from the runners themselves. But you know, without a dashboard, we have no visibility on anything inside of GitLab, like queue lengths or wait times or anything like that.”\n\n- “Our setup is not perfect...some of the runners run on spot instances and can disappear, which means the background engine can die. You get this very strange error that the job failed because of something and we need to retry the job using a different runner.”\n\n**What we did:**\n- Created a list of most recent failures in the last hour for instance runners. Not only can you quickly navigate to the job log and details, but you’re also given a short summary of the error so you get insight into it immediately and can get on your way to fix it.\n\n![Runner failures](https://about.gitlab.com/images/blogimages/2023-11-01-how-we-used-research-to-provide-visibility-into-runner-fleets/runner-failures.png)\n\n**The full dashboard:**\n\n![Full runner dashboard](https://about.gitlab.com/images/blogimages/2023-11-01-how-we-used-research-to-provide-visibility-into-runner-fleets/full-dashboard.png)\n\n## What's next?\nThis first iteration of the dashboard is not the end. We have many iterations planned to improve the dashboard over the next year. To first get feedback on how it works for users, we will run an [Early Adopters Program](https://gitlab.com/groups/gitlab-org/-/epics/11180) for GitLab Ultimate self-managed users. We will work with teams to set up the feature on their instance and continuously ask for feedback once it is being used. This will also help us understand user satisfaction levels and help our team prioritize fixes and new features as we continue improving the experience.\n\n**Do you want to provide feedback now?** We would love to hear what you think! Please add your thoughts about the Fleet Dashboard to [this feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/421737). To learn more about how we built this dashboard, [watch this technical demo](https://www.youtube.com/watch?v=clyfLsss-vM) by Miguel Rincon, Pedro Pombeiro, and Vladimir Shushlin.\n",[9,2312,3582],"design",{"slug":3584,"featured":6,"template":700},"how-we-user-research-transformed-gitlab-runner-fleet-dashboard-visibility-and-metrics","content:en-us:blog:how-we-user-research-transformed-gitlab-runner-fleet-dashboard-visibility-and-metrics.yml","How We User Research Transformed Gitlab Runner Fleet Dashboard Visibility And Metrics","en-us/blog/how-we-user-research-transformed-gitlab-runner-fleet-dashboard-visibility-and-metrics.yml","en-us/blog/how-we-user-research-transformed-gitlab-runner-fleet-dashboard-visibility-and-metrics",{"_path":3590,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3591,"content":3597,"config":3602,"_id":3604,"_type":14,"title":3605,"_source":16,"_file":3606,"_stem":3607,"_extension":19},"/en-us/blog/if-its-time-to-learn-devops-heres-where-to-begin",{"title":3592,"description":3593,"ogTitle":3592,"ogDescription":3593,"noIndex":6,"ogImage":3594,"ogUrl":3595,"ogSiteName":685,"ogType":686,"canonicalUrls":3595,"schema":3596},"It's time to learn DevOps and here's where to begin","DevOps is a unique blend of tech, tools and culture. Take it step-by-step and it's easy to learn. This simple guide shows you how to get started. Learn more here!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663743/Blog/Hero%20Images/three-things-i-learned-in-my-first-month-at-gitlab.jpg","https://about.gitlab.com/blog/if-its-time-to-learn-devops-heres-where-to-begin","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"It's time to learn DevOps and here's where to begin\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-03-10\",\n      }",{"title":3592,"description":3593,"authors":3598,"heroImage":3594,"date":3599,"body":3600,"category":741,"tags":3601},[738],"2022-03-10","\n\nIf you’re fairly new – or really new – to a DevOps team, you’ve made a great career move, but you probably [have a lot to learn](/topics/devops/devops-beginner-resources/). To truly learn DevOps, there are technologies and processes to figure out, phases to understand, and a [whole new mindset to adopt](/blog/soft-skills-are-the-key-to-your-devops-career-advancement/). \n\n## Learn DevOps, where to start?\n\nLearn DevOps? Why? Where?... Since the demand for DevOps professionals is hot and salaries for this [dynamic job sector](/blog/four-tips-to-increase-your-devops-salary/) are on the rise, there are a lot of DevOps beginners trying to figure out what to learn first. But don’t worry: We can help. \n\nWith a lot on [your learn DevOps to-do list](https://learn.gitlab.com/beginners-guide-devops/guide-to-devops), we’ll walk you through where you should start, including figuring out what DevOps is all about, the stages of the DevOps lifecycle, and the uniquely [collaborative culture](/blog/engineering-teams-collaborating-remotely/). \n\n## What DevOps is really all about\n\nIn the past, software development was done using a complicated and confusing jumble of tools and workflows. Both projects and teams often were siloed, which meant they weren’t coordinating efforts or sharing best practices. It was a frustrating and inefficient process that led to deployment traffic jams, costing teams time and money. There were a lot of headaches.\n\nThink of DevOps as a way to simplify development and deployment, while making the entire process more efficient. With DevOps, once-siloed teams, tools, and workflows are combined in a software development ecosystem. That ecosystem enables teams to plan, create and deliver more efficiently, securely, and collaboratively. \n\n## What to learn for DevOps\n\nDevOps also puts a focus on automation, shifting security left, and making practices not only repeatable but measurable. That speeds development cycles and slashes the time between designing new features and rolling them out into production.\n\nBecause of this efficiency and the enablement of teamwork, DevOps makes not only your software delivery more agile, it makes your entire company more agile. DevOps enables the business to pivot quickly, answering new and critical customer needs, responding to changes in the market and adjusting to stay ahead of the competition. \n\n## To learn DevOps, collaborate\n\nDevOps is built around a culture of collaboration that encourages teammates to share ideas and help each other. It’s not simply something that’s suggested and it’s not something that’s done in a meeting or two. Collaboration is a [core principle](/blog/4-must-know-devops-principles/) of DevOps. \n\nIt's easy to think that to learn DevOps means focusing on programming languages, security, and CI/CD. Those skills and technologies are critical but don’t dismiss the idea of collaboration. It’s about communication, and working together to create something new and to fix problems. However, DevOps professionals also collaborate with other departments, like security, marketing, and the C-suite. You’re all pulling in the same direction.\n\nIn the [2021 Global DevSecOps Survey](/developer-survey/), survey respondents consistently said communication and collaboration skills were key to their future careers. \n\n## The key stages of the DevOps lifecycle\n\nThere’s a definite flow to DevOps, with the process moving from planning and developing all the way through to deployment, monitoring, and feedback. There are three basic stages, or phases – build, test, and deploy. Within these are nine other stages that will help you produce software efficiently, reliably, and with speed and agility.\n\n- Planning focuses on everything that happens before a single line of code is written.\n- Creating is about designing and developing.\n- Verifying checks the quality of the code.\n- Packaging applications and dependencies, managing containers, and building artifacts maintains a consistent software supply chain. \n- Release, or deployment, is all about moving code updates into production as iterations are ready.\n- Configuring is focused on creating, managing, and maintaining application environments.\n- Monitoring is about checking the status of software and networks.\n- Protecting is all about securing your applications and their environment.\n- Managing runs end-to-end through your software development lifecycle, controlling permissions and processes. \n\n## What it means to shift security left\n\nDid you notice that security wasn’t one of the lifecycle stages for DevOps? Well, it’s not a single stage because it’s woven into EVERY stage. Shift left means you don’t wait to incorporate security into software at the end of a build. You consider security beginning with the initial planning stage and continue to focus on it all the way through, giving you more opportunity to avoid or find and address any issues. Shifting left enables you to make sure the code you are developing functions as intended, and that any vulnerabilities and compliance issues are caught and fixed.\n\n## Understand CI/CD\n\nFirst off, CI/CD means continuous integration and continuous delivery. Combined continuous development methodologies and practices focus on catching vulnerabilities and errors early in the development lifecycle, ensuring that all the code deployed into production complies with standards the DevOps team has established for the software being created. This helps connect development and operations teams, as well as projects, by using automation for building, testing, and deployment. \n\nCI/CD is all about  incremental code changes being made frequently and reliably – a critical part of how a DevOps platform enables an organization to automatically deliver software multiple times a day. This is key for DevOps teams and the overall business because CI/CD helps to quickly and efficiently move software updates into production, making the organization able to respond faster to customer needs. \n\n## How to get started with DevOps: dig deeper\n\nWant to learn more? Our [Beginner's guide to DevOps](https://page.gitlab.com/resources-ebook-beginners-guide-devops.html) has everything you need to get started.\n",[721,9,873],{"slug":3603,"featured":6,"template":700},"if-its-time-to-learn-devops-heres-where-to-begin","content:en-us:blog:if-its-time-to-learn-devops-heres-where-to-begin.yml","If Its Time To Learn Devops Heres Where To Begin","en-us/blog/if-its-time-to-learn-devops-heres-where-to-begin.yml","en-us/blog/if-its-time-to-learn-devops-heres-where-to-begin",{"_path":3609,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3610,"content":3616,"config":3622,"_id":3624,"_type":14,"title":3625,"_source":16,"_file":3626,"_stem":3627,"_extension":19},"/en-us/blog/inside-the-improved-ci-logs-management-experience-for-multi-line-commands",{"title":3611,"description":3612,"ogTitle":3611,"ogDescription":3612,"noIndex":6,"ogImage":3613,"ogUrl":3614,"ogSiteName":685,"ogType":686,"canonicalUrls":3614,"schema":3615},"Inside the improved CI logs management experience for multi-line commands","Reviewing log output for CI/CD jobs with multi-line commands is now easier than ever. Find out why, how to configure your pipelines, and what's ahead.\n\n","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099499/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_639935439_3oqldo5Yt5wPonEJYZOLTM_1750099498739.jpg","https://about.gitlab.com/blog/inside-the-improved-ci-logs-management-experience-for-multi-line-commands","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside the improved CI logs management experience for multi-line commands\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Romuald Atchadé\"}],\n        \"datePublished\": \"2024-01-25\",\n      }",{"title":3611,"description":3612,"authors":3617,"heroImage":3613,"date":3619,"body":3620,"category":718,"tags":3621},[3618],"Romuald Atchadé","2024-01-25","Improving the GitLab CI/CD log experience for jobs with multi-line commands\nhas been a long-requested feature. With the latest release of GitLab and\nGitLab Runner, it's now easier to work with the log section for jobs with\nmulti-line commands. In this post, we will describe the experience with the\nnew feature, show you how to enable the new log output in your pipelines,\nand discuss key points regarding CI/CD script execution and log output in\nvarious shells, such as Bash and Powershell.\n\n\n## Overview of multi-line commands\n\n\nFirst, it’s helpful to describe what we mean by a CI job with multi-line\ncommands. In GitLab CI the script keyword is used to specify commands to\nexecute for a CI job. In the example below, the build-job has a single\ncommand, a basic echo statement, to execute in the script block. \n\n\n```\n\n## A pipeline with a single line command in the script block for the\nbuild-job\n\n\nbuild-job:\n  stage: build\n  script:\n    - echo \"this is the script to run for the build job\"\n\n```\n\n\nIf you were to run this pipeline, then the log output in the UI would\ndisplay as follows:\n\nLine 17 - GitLab CI automatically generates a log entry for the command that\nyou specify in the script block.\n\nLine 18 - This is the output of the command that was executed.\n\n\n![Ci log management - image\n2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099524/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099524655.png)\n\n\nNow as you can imagine, the script that you define in the script block will\nlikely be more complex than the example provided and could very well span\nmultiple lines in the CI/CD pipeline file. \n\n\n```\n\n## A pipeline with a multi-line command in the script block for the\nbuild-job\n\n\nbuild-job:\n  stage: build\n  script:\n       - |\n         echo \"this is a multi-line command\"  # a simple echo statement\n         ls  \n\n```\n\n\nIf you were to run this pipeline, then the log output in the UI would\ndisplay as follows:\n\n\nLine 17 - As in the previous example, GitLab CI automatically generates a\nlog entry for the command that you specify in the script block. You will\nnotice that line 17 only includes the first command in the script block.\nThis makes it more difficult to debug an issue with script execution as you\nwill need to refer back to the source pipeline file to see exactly what\nscript was executed.\n\n\n![CI log management - image\n3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099525/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750099524656.png)\n\n\n## So what’s new?\n\n\nStarting in GitLab 16.7 and GitLab Runner 16.7, you can now enable a feature\nflag titled FF_SCRIPT_SECTIONS, which will add a collapsible output section\nto the CI job log for multi-line command script blocks. This feature flag\nchanges the log output for CI jobs that execute within the Bash shell.\n\n\n![CI log management - image\n4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099525/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099524658.png)\n\n\nLine 17: Unlike the previous examples, the first thing you will notice in\nthe screenshot above is that by default the log entry for the multi-line\ncommand is collapsed by default.\n\n\nSingle-line commands do not display in a collapsible element.\n\n\nFor multi-line scripts the multi-line command is now a collapsible element,\nso now, when you uncollapse the log entry for line 17, then the log will\ndisplay all of the commands that were executed in the script block.\n\n\n![CI log management - image\n1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099525/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099524659.png)\n\n\nThere is also the [`custom collapsible\nsection`](https://docs.gitlab.com/ee/ci/jobs/#custom-collapsible-sections)\nfeature, which in combination with this new multi-command output capability\ndoes provide you additional flexibility for displaying log output in the UI.\nHere is how you can use the two features to change the log output. \n\n\n```\n\n## A pipeline with a multi-line command in the script block for the\nbuild-job\n\n\nvariables:\n  FF_PRINT_POD_EVENTS: \"true\"\n  FF_USE_POWERSHELL_PATH_RESOLVER: \"true\"\n  FF_SCRIPT_SECTIONS: \"true\"\n\ncollapsible_job_multiple:\n  stage: build\n  script:\n    - |\n      echo \"{\n        'test': 'data',\n        'test2': 'data2',\n      }\"\n    - |\n      echo \"{\n        'test': 'data',\n        'test2': 'data2',\n      }\"\n    - echo -e \"\\033[0Ksection_start:`date +%s`:my_first_section\\r\\033[0KHeader of the 1st collapsible section\"\n    - echo 'this line should be hidden when collapsed'\n    - |\n      echo \"{\n        'test': 'data',\n        'test2': 'data2',\n      }\"\n    - echo -e \"\\033[0Ksection_start:`date +%s`:second_section\\r\\033[0KHeader of the 2nd collapsible section\"\n    - echo 'this line should be hidden when collapsed'\n    - echo -e \"\\033[0Ksection_end:`date +%s`:second_section\\r\\033[0K\"\n    - echo -e \"\\033[0Ksection_end:`date +%s`:my_first_section\\r\\033[0K\"\n\n```\n\n\nIf you were to run this pipeline with the FF_SCRIPT_SECTIONS feature flag\nset to false, then the log output would be as depicted in the following\nscreenshot.\n\n\n![CI log management - image\n5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099524/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099524661.png)\n\n\nBut, if you were to run this pipeline with the FF_SCRIPT_SECTIONS feature\nflag set to true, then the log output would be as depicted in the following\nscreenshot.\n\n\n![CI log management - image\n6](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099525/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099524663.png)\n\n\n## What about other shells?\n\n\nAs of the 16.7 release, the collapsible output section in the CI job log for\nmulti-line command script blocks is only visible for CI/CD jobs that are\nexecuted with the Bash shell. CI/CD jobs executed with Powershell is not\ncurrently supported. We plan to add this\n[capability](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4494)\nin a future release. \n\n\n## What are our future plans?\n\n\nA few features are still needed to improve the CI/CD job log output, and the\n`timestamp` for each log line is one of them. This addition will add missing\nfeatures such as command/section duration.\n\n\n> To learn more about GitLab CI/CD features, refer to the official [CI/CD\ndocumentation](https://docs.gitlab.com/ee/ci/index.html). \n\n\n_Disclaimer: This blog contains information related to upcoming products,\nfeatures, and functionality. It is important to note that the information in\nthis blog post is for informational purposes only. Please do not rely on\nthis information for purchasing or planning purposes. As with all projects,\nthe items mentioned in this blog and linked pages are subject to change or\ndelay. The development, release, and timing of any products, features, or\nfunctionality remain at the sole discretion of GitLab._\n",[785,786,9,917],{"slug":3623,"featured":91,"template":700},"inside-the-improved-ci-logs-management-experience-for-multi-line-commands","content:en-us:blog:inside-the-improved-ci-logs-management-experience-for-multi-line-commands.yml","Inside The Improved Ci Logs Management Experience For Multi Line Commands","en-us/blog/inside-the-improved-ci-logs-management-experience-for-multi-line-commands.yml","en-us/blog/inside-the-improved-ci-logs-management-experience-for-multi-line-commands",{"_path":3629,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3630,"content":3636,"config":3641,"_id":3643,"_type":14,"title":3644,"_source":16,"_file":3645,"_stem":3646,"_extension":19},"/en-us/blog/integrating-azure-devops-scm-and-gitlab",{"title":3631,"description":3632,"ogTitle":3631,"ogDescription":3632,"noIndex":6,"ogImage":3633,"ogUrl":3634,"ogSiteName":685,"ogType":686,"canonicalUrls":3634,"schema":3635},"How to integrate Azure DevOps repositories with GitLab","How to keep your code in an Azure DevOps repository and run CI/CD with GitLab pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664363/Blog/Hero%20Images/aleksey-kuprikov.jpg","https://about.gitlab.com/blog/integrating-azure-devops-scm-and-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to integrate Azure DevOps repositories with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2020-07-09\",\n      }",{"title":3631,"description":3632,"authors":3637,"heroImage":3633,"date":3638,"body":3639,"category":718,"tags":3640},[1835],"2020-07-09","\n\nRecently we’ve been asked by several people if it is possible to integrate between Azure DevOps/VSTS (Visual Studio Team Services) source code management and GitLab. They are looking for a modern [CI/CD solution](/topics/ci-cd/) like GitLab, but as part of a gradual transition they still need to keep managing their code in Azure DevOps/VSTS. \n\n## Does Azure DevOps integrate with GitLab?\n\nYes, Azure DevOps Services does integrate with GitLab.\n\nAlthough we of course recommend using GitLab CI/CD together with our built-in GitLab SCM, this integration of Azure DevOps source code management and GitLab makes it possible to migrate slowly from Azure DevOps by leaving your code in the Azure [DevOps](/topics/devops/) repository while you adopt GitLab CI/CD. This integration is possible with both the self-managed and SaaS versions of GitLab. The integration works only with Azure DevOps/VSTS git version control. TFVC (Team Foundation Version Control) isn’t supported. \n\n### In GitLab, there are two features that enable this integration:  \n\n[GitLab CI/CD for external repositories](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/) \n\n[Remote repository mirroring](https://docs.gitlab.com/ee/user/project/repository/repository_mirroring.html)\n\nWhat is a repository in DevOps?\n\nCode repositories in tools like GitLab and Azure exist to house all source code. Sometimes these repositories are referenced as a DevOps “repo” or a source repository. Whatever the title, code repositories provide a place where developers can work to ensure high code quality. \nGitLab uses a [git-based repository](/solutions/source-code-management/) for source code management with version control. It lets GitLab users perform code reviews and easily solve developer issues.\n\n## What is the difference between GitLab and Azure DevOps?\n\nAzure DevOps has a range of services for managing the development lifecycle. Some of its main features include agile planning boards, private git repos for source code management, and Azure pipelines.\n\nGitLab is a single platform for the entire DevSecOps lifecycle and includes the following:\n\n- Planning and collaboration\n- Source code management\n- Code reviews\n- CI/CD pipelines\n- Constant security scanning and monitoring\n- Advanced deployments\n- Vulnerability management\n\nGitLab can help manage the entire DevSecOps lifecycle to deliver software quickly and efficiently while bolstering security and compliance.\n\n## How do I connect to Azure from GitLab?\n\nIt may take some time to fully move over from Azure to GitLab for source code management. To smooth the transition, there are simple steps to connect to the Azure integration from GitLab.\n\n1. Create a new project in GitLab by clicking the New Project button  ![Create new project ](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado1.png){: .large.center}\n\n2. Choose the ‘CI/CD for external repo’ tab, and click on Repo by URL.  ![CI/CD for external repo](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado2.png){: .large.center}\n\n3. Open your repository in Azure DevOps and click Clone  ![Getting clone url ](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado3.png){: .large.center}\n\n4. Copy the URL. If your repository is private, you will need to generate Git credentials – just click this button and copy the username and password.  ![Credentials](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado4.png){: .large.center}\n\n5. Paste the URL in GitLab under the Git repository URL, give it a name, set the visibility level, and click create project. Add the username and password in case your Azure DevOps repository is private. Note: The repository must be accessible over http://, https:// or git://. When using the http:// or https:// protocols, please provide the exact URL to the repository. HTTP redirects will not be followed.  ![Create project form](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado5.png){: .large.center}\n\n6. Your project is now successfully Mirrored to GitLab. Now branches, tags, and commits will be synced automatically to GitLab. \n\n7. To configure a CI/CD pipeline there are two options:\n\nBefore pushing your first commit, open the CI/CD settings in GitLab and enable Auto DevOps.  It will set the CI/CD configuration, so each commit in Azure Repos will trigger a CI/CD  pipeline in GitLab which will build, test, and deploy your app.  ![Auto DevOps settings](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado6.png){: .shadow.large.center}\n \nAlternatively, in case you want to define the pipeline configuration yourself instead of using the Auto DevOps, add [.gitlab-ci.yml](https://docs.gitlab.com/ee/ci/yaml/) file to  your repository root directory. The Yaml code should include your [CI/CD definitions](/blog/guide-to-ci-cd-pipelines/). Once this file is included in the root directory a CI/CD pipeline will be triggered for each commit. If you are not familiar with .gitlab-ci.yml, start by creating a file with the name .gitlab-ci.yml and paste the below code to it. This code includes build and test stages, and a job that displays text to the console in each stage. Later on you can add additional scripts to each job, and also add additional jobs and stages. To create more complex pipelines, you can [use the pipeline templates](https://docs.gitlab.com/ee/ci/yaml/#includetemplate) that are [shipped with GitLab](https://gitlab.com/gitlab-org/gitlab/tree/master/lib/gitlab/ci/templates) instead of starting it from scracth.\n\n```\nstages:\n  - build\n  - test \n  \nbuild:\n  stage: build\n  script:\n    - echo \"Build job\"\n\ntest:\n  stage: test\n  script:\n    - echo \"Test job\"\n```\n\nThat’s it, you are all set! \n\n## Suggested development flow \n\n![Development flow diagram](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado_7_2.png){: .shadow.large.center}\n\n1. CODE (Developer IDE of choice) Developer uses the favorite IDE to develop code, clones the repo to the workstation and creates a branch.  ![Visual Studio Code](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado8.png){: .shadow.large.center}\n\n2. COMMIT (GIT) After the feature is developed/the bug is fixed, the developer pushes the work to the Azure Repository server.  ![Azure DevOps Repos](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado9.png){: .shadow.large.center}\n\n3. BUILD (GitLab) The branch with the commit history will be mirrored to GitLab. The CI/CD pipeline will be triggered. The pipeline will build the code.  ![GitLab pipeline graph](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado10.png){: .shadow.large.center}\n\n    Artifacts will be created, and be available for download.  ![Artifacts](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado11.png){: .shadow.large.center}\n\n    If Auto DevOps is enabled, a container image will be created and be pushed to the built-in Container Registry.  ![GitLab Container Registry](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado12.png){: .shadow.large.center}\n\n    In case a package registry is enabled in the project, packages will be published to the designated package manager.  ![GitLab Package Registry](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado13.png){: .shadow.large.center}\n\n4. TEST (GitLab) Security scans, license scans, and other tests are executed as part of the CI pipeline.  ![GitLab scans](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado14.png){: .shadow.large.center}\n\n5. REVIEW & PULL REQUEST (GitLab & Azure DevOps repos) Review pipeline results in GitLab and if the pipeline passed without errors, and the new change hasn’t introduced new vulnerabilities, the developer creates a pull request in Azure DevOps. A code review is started and the developer might need to make a few changes before merging to master. Each commit will trigger a CI/CD pipeline in GitLab.  ![Azure DevOps pull request](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado15.png){: .shadow.large.center}\n\n6. MERGE (Azure DevOps Repos and GitLab) The Azure DevOps pull request is approved and the branch will be merged to the master branch in the Azure DevOps Repository.\n\nDepending on your pipeline configuration, this merge to the master branch will trigger the CI/CD pipeline in GitLab to validate the merge results, build new packages and container images, and then deploy them.  ![GitLab CI/CD pipeline graph](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado16.png){: .shadow.large.center}\n\n## Development workflow demonstration \n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/HfpP2pEmkoM\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n## A solution worth trying \n\nGitLab offers a leading source code management and CI/CD solution in one application which many [GitLab customers](/customers/) use together because of the power of this combination. However, we know that sometimes there are constraints that do not allow teams to migrate their repository to GitLab SCM, at least not right away. For these situations, even if it is only temporary, we offer the capability of GitLab CI/CD for external repositories illustrated here. \n\n\n**Read more about GitLab CI/CD:**\n\n[Forrester report compares between leading CI/CD tools](https://about.gitlab.com/analysts/forrester-cloudci19/)\n\n[Autoscale GitLab CI with AWS Fargate](/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate/)\n\n[Case Study - how Goldman Sachs improved from 1 build every two weeks to over a thousand per day](https://about.gitlab.com/customers/goldman-sachs/)\n\nCover image by [Aleksey Kuprikov](https://unsplash.com/@alekskuprfilmz) on [Unsplash](https://unsplash.com/)\n{: .note}\n\n\n",[9,999,917],{"slug":3642,"featured":6,"template":700},"integrating-azure-devops-scm-and-gitlab","content:en-us:blog:integrating-azure-devops-scm-and-gitlab.yml","Integrating Azure Devops Scm And Gitlab","en-us/blog/integrating-azure-devops-scm-and-gitlab.yml","en-us/blog/integrating-azure-devops-scm-and-gitlab",{"_path":3648,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3649,"content":3655,"config":3661,"_id":3663,"_type":14,"title":3664,"_source":16,"_file":3665,"_stem":3666,"_extension":19},"/en-us/blog/integrating-with-gitlab-secure",{"title":3650,"description":3651,"ogTitle":3650,"ogDescription":3651,"noIndex":6,"ogImage":3652,"ogUrl":3653,"ogSiteName":685,"ogType":686,"canonicalUrls":3653,"schema":3654},"How open source contributions accelerate GitLab Secure","Community contributions and an open integration framework allows anyone to extend GitLab Secure","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668622/Blog/Hero%20Images/group-rowing-collaboration.jpg","https://about.gitlab.com/blog/integrating-with-gitlab-secure","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How open source contributions accelerate GitLab Secure\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2020-10-22\",\n      }",{"title":3650,"description":3651,"authors":3656,"heroImage":3652,"date":3657,"body":3658,"category":783,"tags":3659},[2636],"2020-10-22","\nWhen you think about security you probably imagine locks, gates, and closed systems. This is the more traditional approach to security but modern security is much more open and collaborative. If you want to build the most secure systems, there is nothing better than building those systems in the open. Open security practices allow you to get fast feedback from a broad audience with diverse perspectives, helping you build better more holistic solutions. That's our approach to building [GitLab Secure](/stages-devops-lifecycle/secure/) at GitLab. We're leveraging amazing open source security projects, the collective contribution of the wider community, and providing an open integration system for anyone to build on top of GitLab security scanners.\n\n## Shifting left\n\nTraditional security approaches are opaque and late in the development life cycle. Security scans are performed by isolated security experts long after developers write code, often after it's deployed to production. GitLab aims to make security an integrated and continuous process. That's why we've built [GitLab Secure directly integrated into the DevOps life cycle](/solutions/security-compliance/). We are taking security tools and \"shifting left\" to make these tools more accessible to developers earlier in the development life cycle and integrated directly into developers' workflows.\n\n![Traditional Security vs DevSecOps with GitLab](https://about.gitlab.com/images/blogimages/traditional-security-vs-integrated.png)\n\nWe created a detailed survey to learn more about the [2020 DevSecOps Landscape](/developer-survey/#security). The results of the survey indicated that security is still a significant hurdle for most organizations that use DevOps, and show:\n\n- Only 13% of companies give developers access to the results of [application security](/topics/devsecops/) tests\n- Over 42% said testing happens too late in the lifecycle\n- 36% reported it was hard to understand, process, and fix any discovered vulnerabilities\n- 31% found prioritizing vulnerability remediation an uphill battle\n\nThese statistics illustrate why we are building security scanning directly into GitLab with our Secure features. We want to provide integrated security tools to broaden access and make it easier for everyone using GitLab to write more secure code.\n\n## Integrating security tools into everyday workflows\n\nGitLab Secure enables accurate, automated, and continuous assessment of your applications and services, allowing users to proactively identify vulnerabilities and weaknesses to minimize security risk. Secure is not an additional step in your development process nor an additional tool to introduce to your software stack. It is woven into your DevOps cycle, which allows you to adapt security testing and processes to your developers (and not the other way around).\n\nToday [GitLab Secure](/stages-devops-lifecycle/secure/) offers support for a variety of security scanning tools including:\n- [Static Application Security Testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)\n- [Dynamic Application Security Testing (DAST)](https://docs.gitlab.com/ee/user/application_security/dast/)\n- [Container Scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/)\n- [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/)\n- [License Scanning](https://docs.gitlab.com/ee/user/compliance/license_compliance/index.html)\n- [Secret Detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/)\n- [API Fuzzing](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/)\n- [Coverage Fuzzing](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/)\n\nAll of these tools provide unique approaches to finding security problems. No one tool is best at everything, so we wanted to provide a way to leverage many tools in an integrated way, so you're always getting the most relevant security results. Take a look at how GitLab Secure integrates all these tools into common developer workflows on GitLab:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/XnYstHObqlA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Democratizing security\n\nWith GitLab Secure, we've laid the foundation for bringing security tools directly into developers' workflows. At GitLab, we believe in a world where [everyone can contribute](/company/culture/#everyone-can-contribute). [Collaboration](https://handbook.gitlab.com/handbook/values/#collaboration) and [transparency](https://handbook.gitlab.com/handbook/values/#transparency) are part of our core values. This approach changes the way we build security features. That's why as part of our [community stewardship promise](/company/stewardship/#promises) we've made all our open source based [SAST scanners available for all users](/releases/2020/08/22/gitlab-13-3-released/#sast-security-analyzers-available-for-all), we offer [open source projects and nonprofits free access to our best features](/solutions/open-source/join/), and we've created a [security scanner integration framework](https://docs.gitlab.com/ee/development/integrations/secure.html) to allow anyone to contribute security scan tools. Our entire [product strategy and vision](/direction/secure/) is also open source, so everyone can understand our vision for an integrated, accessible, and democratic approach to security. Together we can build a more open and modern security approach that helps developers everywhere write more secure code.\n\n## Integrate with GitLab Secure\n\nOut of the box, GitLab provides a variety of pre-integrated and actively managed open source security tools, such as [SAST's 16 analyzers](https://docs.gitlab.com/ee/user/application_security/sast/#supported-languages-and-frameworks) that all support automatic language detection to always run the most relevant security tool. While GitLab will continue to update and build first-party integrations we wanted to ensure that GitLab contributors and integration partners could easily extend GitLab Secure for third-party tools. Our [open integration framework](https://docs.gitlab.com/ee/development/integrations/secure.html) makes it easy for anyone to leverage all of the [features of GitLab Secure](/pricing/feature-comparison/) with any scanning tool they may want to integrate. You can see all the tools GitLab users have requested support for and even add your own request in our [tracking epic](https://gitlab.com/groups/gitlab-org/-/epics/297).\n\n## Community contributions\n\nWith our open integration framework we've seen members of the [GitLab community](/community/) contribute additional security scanners, help maintain the existing open source scanners we offer and expand the list of supported languages and frameworks we support. Our community contributors are helping every GitLab user have access to more accurate, sophisticated, and relevant security results. Here are some recent community contribution highlights:\n\n- [Mobile SAST support via MobSF](https://gitlab.com/gitlab-org/gitlab/-/issues/233777) (contribution by [@williams.brian-heb](https://gitlab.com/williams.brian-heb)) - [GitLab 13.5 Release MVP](/releases/2020/10/22/gitlab-13-5-released/#mvp)\n- [Adding Helm Chart support](https://gitlab.com/gitlab-org/gitlab/-/issues/36755) (contribution by [@agixid](https://gitlab.com/agixid))\n- [Performance improvements to Fuzz testing](https://gitlab.com/gitlab-org/security-products/analyzers/fuzzers/pythonfuzz/-/merge_requests/1) (contribution by [@jvoisin](https://gitlab.com/jvoisin))\n- [Updates to secret detection](https://gitlab.com/gitlab-org/gitlab/-/issues/205172) (contribution by [@tnir](https://gitlab.com/tnir))\n- [Dependency scanning buxfixes](https://gitlab.com/gitlab-org/gitlab/-/issues/205172) (contribution by [@fcbrooks](https://gitlab.com/fcbrooks))\n- [Updates to Security Scanner underlying operating systems](https://gitlab.com/gitlab-org/gitlab/-/issues/216781) (contribution by [@J0WI](https://gitlab.com/J0WI))\n- [Contributions for .NET Framework Support](https://gitlab.com/gitlab-org/security-products/analyzers/security-code-scan/-/merge_requests/14) (contribution by [@agixid](https://gitlab.com/agixid))\n- [See the full list of Secure community contributions](https://gitlab.com/gitlab-org/gitlab/-/issues?scope=all&utf8=%E2%9C%93&state=all&label_name[]=Community%20contribution&label_name[]=devops%3A%3Asecure)\n\nThe open source nature of GitLab allows the community to help improve, maintain, and contribute features within GitLab. This is the ultimate value of open source. Even if we don't offer something, you can always extend or modify the behavior of GitLab to accomplish your goal. When compared to closed-source Security vendors, this is a huge benefit. The impact these contributions have is massive as GitLab Secure is used by tens of thousands of customers and performs hundreds of thousands of security scans every month. If you are interested in contributing, check out our [contributor program](/community/contribute/) and [contributor documentation](https://docs.gitlab.com/ee/development/contributing/).\n\n## Integration partners\n\nCommunity contributions aren't the only way GitLab Secure is being extended. We have a variety of integration partners who provide security integrations that further expand the suite of security tools available to GitLab users. Check out the [GitLab Security integrations](/partners/#security) our partners offer. If you are a security vendor interested in integrating with GitLab, [join our partner program](/handbook/alliances/integration-instructions/) today.\n\n## Looking ahead\n\nWe've come a long way in the past few years with GitLab Secure and we're not done yet. Our [vision is bold (and open source)](/direction/secure/) and [our investment in security is large](https://internal.gitlab.com/handbook/product/investment/). [Security is a team effort](/direction/secure/#security-is-a-team-effort) and we hope you'll join us on our mission to help developers write more secure code.\n\n## Read more about GitLab SAST:\n\n* GitLab [Secure Direction](/direction/secure/)\n* Learn more about [integrating with GitLab Secure](https://docs.gitlab.com/ee/development/integrations/secure.html)\n* View the latest [October 2020 GitLab security trends](/blog/gitlab-latest-security-trends/)\n\nCover image by [Mitchell Luo](https://unsplash.com/@mitchel3uo) on [Unsplash](https://unsplash.com/s/photos/rowing-team)\n{: .note}\n",[9,721,697,268,3660,827],"contributors",{"slug":3662,"featured":6,"template":700},"integrating-with-gitlab-secure","content:en-us:blog:integrating-with-gitlab-secure.yml","Integrating With Gitlab Secure","en-us/blog/integrating-with-gitlab-secure.yml","en-us/blog/integrating-with-gitlab-secure",{"_path":3668,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3669,"content":3675,"config":3680,"_id":3682,"_type":14,"title":3683,"_source":16,"_file":3684,"_stem":3685,"_extension":19},"/en-us/blog/interactive-take-a-guided-tour-of-the-devsecops-workflow",{"title":3670,"description":3671,"ogTitle":3670,"ogDescription":3671,"noIndex":6,"ogImage":3672,"ogUrl":3673,"ogSiteName":685,"ogType":686,"canonicalUrls":3673,"schema":3674},"Interactive: Take a guided tour of the DevSecOps workflow","Explore GitLab's recommended best practices for DevSecOps with a detailed visual depiction of the main steps in the development lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668969/Blog/Hero%20Images/blog-image-template-1800x945__1800_x_945_px_.png","https://about.gitlab.com/blog/interactive-take-a-guided-tour-of-the-devsecops-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Interactive: Take a guided tour of the DevSecOps workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2024-04-16\",\n      }",{"title":3670,"description":3671,"authors":3676,"heroImage":3672,"date":3677,"body":3678,"category":741,"tags":3679},[1835],"2024-04-16","When engaging in discussions with industry professionals and stakeholders, they quickly grasp the core principles of DevSecOps, which emphasize speed, security, and quality. However, there's often a curiosity about the specific strategies required to achieve optimal speed without compromising security and quality. We created this interactive infographic to showcase GitLab's best practices for [DevSecOps](https://about.gitlab.com/topics/devsecops/) through a detailed visual depiction of the main steps in the development lifecycle.\n\nWalk through every step of the DevSecOps process, including creation of [issues](https://docs.gitlab.com/ee/user/project/issues/), development and pushing of code, [security testing](https://about.gitlab.com/stages-devops-lifecycle/secure/), and deployment to production. Each step features a deep dive with additional resources such as demos, blog posts, and documentation.\n\n## Get started with the interactive tour\n\nClick on the image below to access the guided tour, and use the navigation buttons or keyword arrows to easily make your way through the flow.\n\n[![GitLab workflow description](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676928/Blog/Content%20Images/infographic.png)](https://tech-marketing.gitlab.io/static-demos/gitlab-infographic.html)\n\n\u003Cp>\u003C/p>\n\n> > Learn how [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI-powered features, further enhances the DevSecOps workflow.\n",[696,875,9],{"slug":3681,"featured":91,"template":700},"interactive-take-a-guided-tour-of-the-devsecops-workflow","content:en-us:blog:interactive-take-a-guided-tour-of-the-devsecops-workflow.yml","Interactive Take A Guided Tour Of The Devsecops Workflow","en-us/blog/interactive-take-a-guided-tour-of-the-devsecops-workflow.yml","en-us/blog/interactive-take-a-guided-tour-of-the-devsecops-workflow",{"_path":3687,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3688,"content":3694,"config":3700,"_id":3702,"_type":14,"title":3703,"_source":16,"_file":3704,"_stem":3705,"_extension":19},"/en-us/blog/introducing-accessibility-testing-in-gitlab",{"title":3689,"description":3690,"ogTitle":3689,"ogDescription":3690,"noIndex":6,"ogImage":3691,"ogUrl":3692,"ogSiteName":685,"ogType":686,"canonicalUrls":3692,"schema":3693},"Introducing Accessibility Testing in GitLab","How Accessibility Testing reinforces our value that everyone can contribute","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666941/Blog/Hero%20Images/accessibility-vision.jpg","https://about.gitlab.com/blog/introducing-accessibility-testing-in-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing Accessibility Testing in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"James Heimbuck\"}],\n        \"datePublished\": \"2020-03-04\",\n      }",{"title":3689,"description":3690,"authors":3695,"heroImage":3691,"date":3697,"body":3698,"category":978,"tags":3699},[3696],"James Heimbuck","2020-03-04","{::options parse_block_html=\"true\" /}\n\n\nGitLab has introduced continuous accessibility testing with the 12.8\nrelease. This post adds some context to the [release\npost](/releases/2020/02/22/gitlab-12-8-released/#automated-accessibility-scanning-of-review-apps)\nand introduces our vision for accessibility testing going forward.\n\n\n## Why isn't everyone doing accessibility testing?\n\n\nOne of the core tenets of the vision of GitLab is that [everyone can\ncontribute](https://about.gitlab.com/company/vision/#vision). We also firmly\nbelieve that GitLab should be accessible to everyone and projects created\nwith GitLab should as well. To better realize this we have introduced\nAccessibility Testing as a new feature in GitLab 12.8.\n\n\nIn talking to developers, and even my own colleagues, I have been surprised\nabout how many people around me are color blind and have realized how easy\nthis is to overlook. In fact, 8% of the United States male population is\ncolor blind, and this is just one kind of visual impairment that may impact\na web site's readability. Take for instance the classic North American color\nindicators for Go / Stop or Good / Bad - Red and Green. An example of the\nGitLab pipeline page is shown below as users without and with protanopia\nwould see the page. GitLab does make use of differing icons and wording as\nindicators of a pipeline's status, but the difference is striking.\n\n\n![Pipelines\npage](https://about.gitlab.com/images/blogimages/accessibility_direction/pipelines.png){:\n.shadow}\n\nHow the Pipelines page appears to users without Protanopia\n\n{: .note.text-center}\n\n\n![Pipelines with\nProtanopia](https://about.gitlab.com/images/blogimages/accessibility_direction/pipelines-with-protanopia.png){:\n.shadow}\n\nHow the Pipelines page appears to users with Protanopia\n\n{: .note.text-center}\n\n\nAs we dug into the problem with developers we found two common themes:\n\n1.  Accessibility testing is done late in the development lifecycle, often\non a Release Candidate, when it is too late to make changes.\n\n1.  Accessibility testing is becoming more important across organizations.\n\n\n## Why is accessibility testing a hot topic?\n\n\nPizza. More specifically one persons ability to [customize a\npizza](https://www.usatoday.com/story/money/2019/10/07/dominos-pizza-website-accessibility-supreme-court-wont-hear-case/3904582002/).\nWhile some companies may see accessibility testing as a defensive move to\nprevent law suits we see it a different way at GitLab. We want to build\nsoftware for everyone and we want to build it quickly. Providing developers\na mechanism to improve the accessibility of their software for all users is\njust the right thing to do.\n\n\n## How is GitLab solving this problem?\n\n\nWith the [12.8\nrelease](/releases/2020/02/22/gitlab-12-8-released/#automated-accessibility-scanning-of-review-apps)\nGitLab has introduced accessibility scanning in the Core product. By\nincluding the Accessibility template to your .gitlab-ci.yml file and\nproviding a URL the accessibility scan can be run on every merge request\nthat includes that job. \n\n\n```yaml\n\ninclude:\n  template: Verify/Accessibility.gitlab-ci.yml\n\na11y:\n  variables:\n    a11y_urls: https://example.com\n```\n\n\nThe example above is from the\n[documentation](https://docs.gitlab.com/ee/ci/testing/accessibility_testing.html)\nwhich includes some additional details. \n\n\nIn the current version of the feature the result is a simple HTML report\nthat outlines the issues with the Review App as identified by the scanner,\nwhich uses the [W2CAGAA standards](https://www.w3.org/TR/WCAG20/). We\nrecognize that this is just a small step in making software more accessible\nbut we are excited about the future.\n\n\nFuture iterations and improvements in GitLab we will be working on for a11y\ntesting include:\n\n* [Identify and scan only changed\npages](https://gitlab.com/gitlab-org/gitlab/issues/207383)\n\n* [Display newly introduced issues in the Merge Request\nview](https://gitlab.com/gitlab-org/gitlab/issues/39425)\n\n* [Display the full a11y report in\nGitLab](https://gitlab.com/gitlab-org/gitlab/issues/36170)\n\n\nThis is not a comprehensive list by any means and you can send suggestions\nfor improvements or follow along as the category matures on the\nAccessibility Testing direction page.\n\n\nPhoto by [Matt Noble](https://unsplash.com/@mcnoble) on\n[Unsplash](https://unsplash.com/)\n\n{: .note}\n",[9,695],{"slug":3701,"featured":6,"template":700},"introducing-accessibility-testing-in-gitlab","content:en-us:blog:introducing-accessibility-testing-in-gitlab.yml","Introducing Accessibility Testing In Gitlab","en-us/blog/introducing-accessibility-testing-in-gitlab.yml","en-us/blog/introducing-accessibility-testing-in-gitlab",{"_path":3707,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3708,"content":3714,"config":3719,"_id":3721,"_type":14,"title":3722,"_source":16,"_file":3723,"_stem":3724,"_extension":19},"/en-us/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate",{"title":3709,"description":3710,"ogTitle":3709,"ogDescription":3710,"noIndex":6,"ogImage":3711,"ogUrl":3712,"ogSiteName":685,"ogType":686,"canonicalUrls":3712,"schema":3713},"How autoscaling GitLab CI works on AWS Fargate","Run your CI jobs as AWS Fargate tasks with GitLab Runner and the Fargate Driver","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681285/Blog/Hero%20Images/runner-autoscale-fargate-blog-cover.jpg","https://about.gitlab.com/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How autoscaling GitLab CI works on AWS Fargate\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2020-05-11\",\n      }",{"title":3709,"description":3710,"authors":3715,"heroImage":3711,"date":3716,"body":3717,"category":718,"tags":3718},[1465],"2020-05-11","\n\nAutoscaling GitLab Runner is a unique value proposition for teams that run their self-managed build agents on cloud-hosted virtual machines. As the number of [CI/CD jobs](/topics/ci-cd/) run over a specific period can fluctuate, teams must have build agent auto-scaling solutions in place that are easy to set up, configure, and cost-efficient.  \n\nGitLab Runner [autoscaling](https://docs.gitlab.com/runner/configuration/autoscale.html) responds to demand by provisioning new cloud-hosted virtual machines with Docker and GitLab Runner. When demand is lower, any additional virtual machines above the configured minimum size are de-provisioned. However, while this model of automatically provisioning and terminating virtual machine instances continues to be useful for a vast plethora of use cases, customers also want to take advantage of the capabilities of cloud container orchestration solutions for executing GitLab CI/CD jobs. For some, adopting GitLab's Kubernetes integration for AWS Elastic Kubernetes Service and Google Kubernetes Engine has allowed them to take advantage of the benefits of containerized pipelines. For others, AWS Fargate has proven to be a compelling container orchestration solution, as it simplifies the process of launching and managing containers on AWS services ECS and EKS.\n\nWe are pleased to announce that as of the [12.10](/releases/2020/04/22/gitlab-12-10-released/) release, you can now auto-scale GitLab CI jobs on AWS Fargate managed containers.\n\n![](https://about.gitlab.com/images/blogimages/autoscaling-runners-ci-ecs-fargate.png)\n\n## So how does it work? \n\nIn GitLab 12.1, we released the GitLab Runner [Custom executor](https://docs.gitlab.com/runner/executors/custom.html). With the custom executor, you can create drivers for GitLab Runner to execute a job on technology or a platform that is not supported natively. To enable executing GitLab CI jobs on AWS Fargate, we developed a [GitLab AWS Fargate driver](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/fargate) for the Custom executor.  This driver uses the AWS Fargate `run-task` action to schedule a new task. A task in ECS is an instance of a task definition that runs the container or containers defined within the task definition. In this containerized solution for CI builds, the pipeline job executes on a container built from an image that must include the tools that you need to build your application.\n\nThe AWS Fargate Driver works in conjunction with GitLab Runner, a lightweight executable that executes pipeline jobs. Similar to the GitLab Runner executable, a `config.toml` file is the file used to pass configuration parameters to the driver. The AWS Fargate driver divides the CI job into the following stages.\n\n1. Config\n1. Prepare\n1. Run\n1. Cleanup\n\n## SSH connectivity\n\nFor the Fargate Driver to execute build commands in the container that is running as a task on ECS, the driver needs to be able to SSH into the container. So we have built additional capabilities into the driver to allow for a SSH connection between the GitLab Runner + AWS Fargate driver and the CI build container. \n\n![Fargate Driver SSH Connectivity](https://about.gitlab.com/images/blogimages/runner_fargate_driver_ssh.png)\n\n## Limitations\n\nAWS Fargate does not support running containers in privileged mode. For example, Docker-in-Docker (DinD), which enables the building and running of container images inside of containers, does not work on Fargate. In keeping with one of GitLab's core values, iteration, we will continue to iterate on solutions for this problem. So stay tuned for future enhancements.\n\n## Getting Started\n\nTo get started, review our detailed [configuration and setup guide.](https://docs.gitlab.com/runner/configuration/runner_autoscale_aws_fargate/index.html)\n\nWith the release of the GitLab Runner AWS Fargate driver, we provide the most diverse set of options in the industry for executing CI pipeline jobs in an autoscaling configuration. These options now include cloud-delivered virtual machines, AWS EC2, Google GCP, Azure Compute, and container orchestration platforms: AWS EKS, AWS ECS + Fargate, and Google Kubernetes. Our long term goal is to provide the best and most comprehensive solution for executing CI jobs at scale on the major cloud platforms.\n\n\nCover image by [Alessio Lin](https://unsplash.com/@lin_alessio) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9,695,828],{"slug":3720,"featured":6,"template":700},"introducing-autoscaling-gitlab-runners-on-aws-fargate","content:en-us:blog:introducing-autoscaling-gitlab-runners-on-aws-fargate.yml","Introducing Autoscaling Gitlab Runners On Aws Fargate","en-us/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate.yml","en-us/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate",{"_path":3726,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3727,"content":3733,"config":3738,"_id":3740,"_type":14,"title":3741,"_source":16,"_file":3742,"_stem":3743,"_extension":19},"/en-us/blog/introducing-ci-cd-steps-a-programming-language-for-devsecops-automation",{"title":3728,"description":3729,"ogTitle":3728,"ogDescription":3729,"noIndex":6,"ogImage":3730,"ogUrl":3731,"ogSiteName":685,"ogType":686,"canonicalUrls":3731,"schema":3732},"Introducing CI/CD Steps, a programming language for DevSecOps automation","Inside GitLab’s vision for CI/CD programmability and a look at how we simplified workflow automation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665151/Blog/Hero%20Images/blog-image-template-1800x945__27_.png","https://about.gitlab.com/blog/introducing-ci-cd-steps-a-programming-language-for-devsecops-automation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing CI/CD Steps, a programming language for DevSecOps automation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2024-08-06\",\n      }",{"title":3728,"description":3729,"authors":3734,"heroImage":3730,"date":3735,"body":3736,"category":693,"tags":3737},[1465],"2024-08-06","For years, the DevOps industry has tried to simplify how developers create automation scripts or workflows to automatically test a code change and to perform a task with the resulting artifact or binary. Today, we are introducing [CI/CD Steps](https://docs.gitlab.com/ee/ci/steps/), a programming language for DevSecOps automation in experiment phase, as a solution to this challenge. With CI/CD Steps, software development teams can easily create complex automation workflows within GitLab.\n\n## The path to CI/CD Steps\n\nEarly in the company's history, GitLab founders and engineers decided that there must be a tight integration between source code management, the place you store your code, and continuous integration, the automation workflows that test your code changes. And we've continued to evolve that integration, focusing on workflow automation tasks and differentiating from the approaches of CI engines across the industry, including Jenkins CI's domain-specific language, GitHub Actions, and many more. \n\nAnd, yes, I did mean to use the term workflow automation tasks rather than [CI and continuous deployment (CD)](https://about.gitlab.com/topics/ci-cd/). This is simply a result of the code that I have seen our customers develop. In a lot of cases, the platform engineering teams that support development teams using GitLab are writing complex automation scripts (workflows). So we need to embrace a more expansive construct beyond simply CI and CD. In fact, I have seen some developers rave about the flexibility of new CI/CD solutions that allow for modularity and conditionals in writing automation workflows.\n\nAt GitLab, our initial approach for CI authoring was based on YAML. We can endlessly debate the pros and cons of such a choice, but for me, as a [DevOps](https://about.gitlab.com/topics/devops/) practitioner coming from a large Fortune 50 company with a moshpit of Jenkins Groovy code and hundreds of permutations of scripts basically performing the same job, the GitLab CI authoring and execution approach was a breath of fresh air. \n\nThe first time I read a GitLab CI file – this was back in mid-2019 – my first thought was, \"No, it could not be that simple.\" A non-developer can easily grasp the intent of a basic GitLab CI pipeline without prior knowledge of all of the intricacies of the syntax of the execution model. In fact, I had just spent a year working on a team that spent several hours each day helping other development teams debug Jenkins pipelines written in Groovy and trying to figure out how to test, and in some cases build, large Java monoliths; in other cases, tons of microservices.\n\nWhile there are benefits to a GitLab CI YAML-based authoring and a bash script execution type approach, there are also limitations. Limitations that developers or platform engineers bump into as they integrate more complex workflows into their CI pipelines. These issues seem to be amplified at enterprise scale as platform teams are trying to simplify or standardize workflows across multiple development teams. In fact, one of the quotes from a recent customer survey states: “GitLab needs to embrace a post-YAML world for CI.”\n\nSo, over the past two years, our pipeline authoring team, led by Product Manager [Dov Hershkovitch](https://gitlab.com/dhershkovitch), has been working extensively on improving the pipeline authoring experience. They've also been improving the management experience of the building blocks for workflow automation – especially at scale. In fact, a part of this work, the [GitLab CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/), recently became generally available.\n\nThe logical next step was to build a new language for workflow automation.\n\n## Understanding CI/CD Steps\n\nGitLab CI/CD Steps is a concept incubated by our top-notch engineers. In [our documentation](https://docs.gitlab.com/ee/ci/steps/), we describe CI/CD Steps as reusable and composable pieces of a CI job that can be referenced in a GitLab CI pipeline configuration. But what does that really mean and what is the long-term value proposition?\n\nAs I was giving this some thought, a comment from one of our customers (paraphrased here) came to mind:\n\n“CI/CD Steps enables you to compose inputs and outputs for a CI/CD job. With CI/CD Steps, developers can define inputs and outputs and, therefore, use CI/CD Steps as a function as we do in any modern programming language. A key differentiator to a normal CI/CD component is that CI/CD Steps allows the use of the outputs of other steps without GitLab having to know certain values before running the pipeline. With CI/CD Steps, you could more easily auto-cancel redundant jobs when all jobs are running as part of the parent pipeline versus having to use child pipelines.”\n\nHaving CI/CD Steps alongside the current GitLab CI/CD execution mechanism and the [CI/CD component catalog](https://docs.gitlab.com/ee/ci/components/index.html) unlocks so many possibilities for creating and maintaining the most complex CI/CD workflows. \n\nA key feature is reusability. Now, I am not suggesting that once we release CI/CD Steps as generally available, you would immediately start refactoring your currently working CI/CD jobs to CI/CD Steps. Instead, you likely will find opportunities to introduce CI/CD Steps to optimize complex pipeline workflows, and, in doing so, you will begin to reuse a CI/CD Step that you author in multiple pipelines.\n\nCI/CD Steps is a marathon, not a sprint. When we release this in beta (currently targeted for late 2024) and start getting feedback from you, we will learn new information that will guide the evolution of this new CI programming language as well as the new Step Runner, which is designed specifically to run CI/CD Steps alongside the current CI/CD jobs.\n\nI'm sure there will be questions about our strategy: Why did we make certain syntax choices? Why didn't we use Starlark as the basis for this new approach? Why did we create something new that we all have to learn? My boilerplate response is: At GitLab we develop our software in the open. More importantly, as a customer, user, and community member, if you have an idea of how to make it better, we invite you to create a merge request so we can improve this feature together.\n\nWe are the only enterprise software platform where, as users and customers, **you** have a direct say in how the platform evolves and **you** can see the changes happening transparently and in real time. That’s the power of GitLab – we iterate and we collaborate. You have invested in a platform and community that is able to evolve with the ever-changing software industry.\n\n## Create your own CI/CD step\n\nTo get a deeper understanding of CI Steps and our direction, take a look at the detailed refactoring proof-of-concept writeup in [this issue](https://gitlab.com/gitlab-org/step-runner/-/issues/85). [Principal engineer Joe Burnett](https://gitlab.com/josephburnett) walks through in great detail the thought process for refactoring a CI/CD job used as part of our GitLab Runner automated test framework. There are also recommendations noted at the end that will inform the evolution of the CI Steps syntax.\n\nThen check out the [CI/CD Steps tutorial](https://docs.gitlab.com/ee/tutorials/setup_steps/) and try creating your own CI/CD step. We recently released the `run` keyword, so testing out a CI/CD step will be simpler than previous examples that required using environment variables. This feature set is experimental so please share your experiences on the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/460057). There also is a separate feedback issue if you are testing the [Run GitHub Actions with CI/CD Steps experimental feature](https://docs.gitlab.com/ee/ci/steps/#actions).\n\nWe look forward to working with you on this journey to continuously improve the GitLab CI/CD authoring experience.\n\n## Read more\n- [CI/CD Catalog goes GA](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/)\n- [FAQ: GitLab CI/CD Catalog](https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog/)\n- [What is CI/CD?](https://about.gitlab.com/topics/ci-cd/)\n- [The basics of CI](https://about.gitlab.com/blog/basics-of-gitlab-ci-updated/)\n",[495,9,786,785,695],{"slug":3739,"featured":91,"template":700},"introducing-ci-cd-steps-a-programming-language-for-devsecops-automation","content:en-us:blog:introducing-ci-cd-steps-a-programming-language-for-devsecops-automation.yml","Introducing Ci Cd Steps A Programming Language For Devsecops Automation","en-us/blog/introducing-ci-cd-steps-a-programming-language-for-devsecops-automation.yml","en-us/blog/introducing-ci-cd-steps-a-programming-language-for-devsecops-automation",{"_path":3745,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3746,"content":3752,"config":3757,"_id":3759,"_type":14,"title":3760,"_source":16,"_file":3761,"_stem":3762,"_extension":19},"/en-us/blog/introducing-ci-components",{"title":3747,"description":3748,"ogTitle":3747,"ogDescription":3748,"noIndex":6,"ogImage":3749,"ogUrl":3750,"ogSiteName":685,"ogType":686,"canonicalUrls":3750,"schema":3751},"Introducing CI/CD components and how to use them in GitLab","Learn the main benefits for using CI/CD components in your CI/CD pipelines and how to achieve them.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667676/Blog/Hero%20Images/buildingblocks.jpg","https://about.gitlab.com/blog/introducing-ci-components","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing CI/CD components and how to use them in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2023-07-10\",\n      }",{"title":3747,"description":3748,"authors":3753,"heroImage":3749,"date":3754,"body":3755,"category":718,"tags":3756},[1567],"2023-07-10","Welcome to the third blog in our series on GitLab's CI/CD components! If you\nhaven't already, we encourage you to read \"[How to build reusable CI/CD\ntemplates](https://about.gitlab.com/blog/how-to-build-reusable-ci-templates/)\"\nand \"[Use inputs in includable\nfiles](https://about.gitlab.com/blog/use-inputs-in-includable-files/)\"\nto gain a comprehensive understanding of these exciting new capabilities. In\nthis blog post, we'll dive in and explore the power of GitLab's CI/CD\ncomponents in revolutionizing CI/CD workflows. We'll also provide a glimpse\ninto the future of GitLab's CI/CD ecosystem, including the upcoming release\nof the [CI/CD\ncatalog](https://docs.gitlab.com/ee/architecture/blueprints/ci_pipeline_components/),\na framework containing a collection of these components. With these moves,\nGitLab is taking a significant step towards streamlining pipeline\nconfigurations and enhancing reusability.\n\n\n### CI/CD components\n\nIn [GitLab\n16.1](https://about.gitlab.com/releases/2023/06/22/gitlab-16-1-released/),\nan exciting experimental feature called CI/CD components was introduced.\nCI/CD components are reusable, single-purpose building blocks that abstract\naway pipeline configuration units.\n\n\nBy leveraging the power of CI/CD components, users can unlock several key\nbenefits:\n\n1. **Reusability and abstraction.** CI/CD components allow pipelines to be\nassembled using abstractions instead of defining all the details in one\nplace. With components encapsulating implementation details, developers can\nfocus on composing pipelines using pre-built, reusable blocks. This approach\npromotes modularity, code reusability, and simplifies pipeline maintenance.\n\n2. **Flexibility with input.** Components support input parameters, enabling\ncustomization based on pipeline contexts, making them adaptable and reusable\nacross various pipeline stages. Developers gain the ability to build a\ndynamic CI/CD catalog that is tagged and versioned, providing better control\nand compatibility. Developers can reference specific component versions,\nensuring stability and reproducibility. By leveraging version tags, teams\ncan maintain consistency in their pipelines while easily upgrading to newer\nversions when desired.\n\n4. **High-quality standards through testing.** Testing components as part of\nthe development workflow to ensure quality maintains high standards is\nstrongly recommended. By incorporating testing into the CI/CD process,\ndevelopers can verify the reliability and functionality of components,\nidentify and fix issues early on, and deliver more robust and dependable\npipelines.\n\n5. **The CI/CD catalog.** A centralized repository of components, the CI/CD\ncatalog is set to be released soon, and will act as a treasure trove of\ncomponents curated to cover a wide range of use cases. This centralized\nrepository offers developers a one-stop shop for discovering, integrating,\nand sharing components. Teams can benefit from a growing catalog of\npre-built, quality-tested components, saving time and effort in configuring\ntheir pipelines.\n\n\nIn the previous blog posts, we discussed the main benefits for the first two\npoints (which are also available with CI/CD templates), but now let's dig\ndeeper into components and how they could revolutionize the way you\nconstruct your pipelines.\n\n\n### Testing a CI/CD component\n\nAs software development continues to evolve, ensuring the reliability and\nquality of code components becomes increasingly vital.\n\n\nOne of the main benefits of using components is the ability to thoroughly\ntest components before software is officially released, enabling a more\nrobust and streamlined development process. In our context, a released\ncomponent is versioned and will follow a structured syntax, allowing for\nseamless integration within pipelines. \n\n\n```yaml\n\ninclude:\n  - components: /path/to/project@\u003Cversion> \n```\n\nOne of the unique benefits of our CI/CD components is the flexibility they\noffer. DevSecOps teams can opt in for an \"unofficial\" release by appending\n`@commit_SHA`, allowing them to experiment and iterate on their code before\nmaking it an official release.\n\n\n```yaml\n\ninclude:\n  - components: /path/to/project@\u003Ccommit_SHA> \n```\n\nTo make a component an official release, users must tag it, essentially\ncreating a versioned snapshot. The tagged release will then be made\navailable in our comprehensive CI/CD catalog (launching soon), providing\nusers with easy access to a range of thoroughly tested and approved\ncomponents. To ensure the stability and reliability of your CI/CD\ncomponents, it is crucial to thoroughly test them. DevSecOps teams can\nleverage the power of our pipeline by utilizing the commit_SHA identifier to\nrun comprehensive tests. If the pipeline successfully passes all tests, they\ncan proceed to tag the component, signifying its readiness for release.\n\n\nBy configuring a release job based on the tagged version, DevSecOps teams\ncan confidently incorporate the official component into their projects,\nknowing that it has undergone testing and validation. To learn more about\nhow to test components, you can check out our\n[documentation](https://docs.gitlab.com/ee/ci/components/#test-a-component)\nor watch this walkthrough video:\n\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe width=\"1870\" height=\"937\"\nsrc=\"https://www.youtube.com/embed/Vw8-ce8LNBs\" title=\"\" frameborder=\"0\"\nallow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope;\npicture-in-picture\" allowfullscreen>\u003C/iframe>\n\n\u003C/figure>\n\n\n### Versioning and tagging\n\nAs mentioned in the previous section, DevSecOps teams can leverage the\n`@version` or the `@commit_SHA` to refer to a component in their pipeline.\nAnother option to refer to a component is by leveraging the `@latest`.\n\n\n```yaml\n\ninclude:\n  - components: /path/to/project@latest\n```\n\nThis will use the latest official (tagged) available components. When used\nin a pipeline in combination with reliable tests, you can guarantee that\nyour components used in a pipeline will always be tested and verified.\n\n\n### On the horizon: CI/CD catalog\n\nOne of the biggest benefits of using components is yet to be seen and will\nbe available with the launch of our CI/CD catalog. The catalog will allow\nusers to search, find, and understand how to use components that are\navailable across their organization, setting a framework for them to\ncollaborate on pipeline constructs so that they can be evolved and improved\nover time. Stay tuned!\n\n\n### Dogfooding components \n\nAt GitLab, we believe in [dogfooding our own\nproduct](https://handbook.gitlab.com/handbook/values/#dogfooding). To\ndemonstrate the power and practicality of CI/CD components, we have\nconverted some of our GitLab templates into components and asked our\ninternal team to use them and provide additional feedback. By doing so, we\nare actively using and testing components in real-world scenarios,\nuncovering insights, and continuously improving their functionality. In this\n[group](https://gitlab.com/gitlab-components), we’ve converted Code Quality,\nContainer Scanning and SAST templates into CI/CD components and asked\ninternal teams to use them.\n\n\nThrough this dogfooding process, we are not only validating the\neffectiveness of CI/CD components but also gaining invaluable experience and\nfeedback to refine and enhance our offering. It's a testament to our\ncommitment to providing practical and reliable solutions for our users. You\ncan view the ongoing discussions between the internal teams in this\n[issue](https://gitlab.com/gitlab-org/gitlab/-/issues/390656).\n\n\n### Call for action\n\nThe CI/CD component catalog is currently in an experimental phase, so we\nadvise against using it in a production environment at this time. There is a\nhigh probability of changes being made to it. We are currently working on\nreorganizing the folder structure of the components to prepare for the\nlaunch of the CI/CD catalog. You can stay updated on our progress by\nfollowing our [epic](https://gitlab.com/groups/gitlab-org/-/epics/10728), or\nlet us know what you think in this dedicated [feedback\nissue](https://gitlab.com/gitlab-org/gitlab/-/issues/407556).\n\n\n### What's next\n\nGitLab's CI/CD component catalog and its accompanying CI/CD components\nfeature are ushering in a new era of streamlined pipeline configurations. By\nembracing reusability, abstraction, input flexibility, versioning, and a\ncentralized catalog, developers can build efficient, adaptable, and\nmaintainable CI/CD workflows. The CI/CD component catalog empowers teams to\naccelerate their software delivery, collaborate effectively, and leverage\nthe full potential of GitLab's CI/CD capabilities.\n\n\nStay tuned for the launch of the CI/CD catalog, where you'll gain access to\nan extensive collection of components, unlocking new possibilities for your\npipelines. GitLab remains committed to empowering developers with\ncutting-edge tools, driving innovation, and simplifying the complexities of\nmodern software development.\n\n\n> Learn more about the CI/CD Catalog and components:\n\n>  \n\n> - [CI/CD Catalog goes GA: No more building pipelines from\nscratch](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/)\n\n> \n\n> - [A CI/CD component builder's\njourney](https://about.gitlab.com/blog/a-ci-component-builders-journey/)\n\n>\n\n> - [FAQ: GitLab CI/CD\nCatalog](https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog/)\n\n>\n\n> - [Documentation: CI/CD components and CI/CD\nCatalog](https://docs.gitlab.com/ee/ci/components/)\n\n> \n\n\nCover image by [Alexander\nGrey](https://www.pexels.com/photo/assorted-color-bricks-1148496/) on\n[Pexels](https://www.pexels.com).\n\n{: .note}\n",[917,9,695],{"slug":3758,"featured":6,"template":700},"introducing-ci-components","content:en-us:blog:introducing-ci-components.yml","Introducing Ci Components","en-us/blog/introducing-ci-components.yml","en-us/blog/introducing-ci-components",{"_path":3764,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3765,"content":3771,"config":3776,"_id":3778,"_type":14,"title":3779,"_source":16,"_file":3780,"_stem":3781,"_extension":19},"/en-us/blog/introducing-resource-groups",{"title":3766,"description":3767,"ogTitle":3766,"ogDescription":3767,"noIndex":6,"ogImage":3768,"ogUrl":3769,"ogSiteName":685,"ogType":686,"canonicalUrls":3769,"schema":3770},"Introducing: Resource groups","How we’re improving deployments by limiting pipeline concurrency.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679102/Blog/Hero%20Images/resource-groups.jpg","https://about.gitlab.com/blog/introducing-resource-groups","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing: Resource groups\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-01-21\",\n      }",{"title":3766,"description":3767,"authors":3772,"heroImage":3768,"date":3773,"body":3774,"category":1040,"tags":3775},[715],"2020-01-21","\nGitLab CI/CD pipelines build, test, deploy your code as part of a single workflow integrated across all [stages of the DevOps lifecycle](/topics/devops/). Ultimately, we want to enable teams to deploy better software faster to their customers, and we do that by continually [iterating](https://handbook.gitlab.com/handbook/values/#iteration) on new and existing features to improve the GitLab experience.\n\nContinuous delivery is all about making sure that [CI-validated code](/solutions/continuous-integration/) goes through a structured deployment pipeline. While GitLab CI continues to be [a top-rated solution in continuous integration](/analysts/forrester-cloudci19/), we want our continuous delivery capabilities to be just as loved and feedback from the GitLab community plays a big role in how we improve the user experience.\n\nAt GitLab, everything we do is [public by default](https://handbook.gitlab.com/handbook/values/#public-by-default). This allows us to collaborate and share ideas, documentation, examples, and processes with the whole community. The original idea of limiting pipeline concurrency using resource groups was introduced by [@inem](https://gitlab.com/inem) in [a public issue](https://gitlab.com/gitlab-org/gitlab/issues/15536) and the response was certainly enthusiastic.\n\n![Resource groups response](https://about.gitlab.com/images/blogimages/resource-groups-1.png){: .shadow.small.center}\n\nFor some users, they found that running multiple pipelines and/or jobs at the same time in an environment would lead to errors. Some pipelines and/or jobs use unique resources, and concurrent deployments meant that multiple users were affecting the environment with some unintended consequences.\n\n### Example:\n\nLet's say your team is developing a mobile app and you deploy it for testing purposes to a physical smartphone on a Friday afternoon. Maybe you're a startup and only have one or two phones for this purpose. You may need to clear the cache and delete the app before downloading it again so you can start the test clean. But what if in the middle of your test, someone else decides to clear the data on that device? Situations like this would inevitably cause errors, leaving teams with little choice but to coordinate these deployments amongst themselves.\n\nWe’re always working hard to enable [speedy, reliable pipelines](/direction/ops/#speedy-reliable-pipelines). Coming to GitLab 12.7, available tomorrow, we’re introducing the `resource_group` attribute to projects so that only one job can deploy to a specific resource group at any given time. This will improve deployment flows, especially when deploying to physical environments.\n\nIf we go back to the mobile phone example, the phone would be it’s own `resource_group` and will only have one deployment at a time. If another deployment were to try and run on this device, the job will be queued until the first job is finished with the message “waiting for resource.”\n\n![waiting on resource](https://about.gitlab.com/images/blogimages/resource-groups-2.png){: .shadow.medium.center}\n\nTeams can define multiple `resource_group`(s) for their environment in `.gitlab-ci.yml`. Even if running separate pipelines, as long as a `resource_group` is assigned then the jobs will not run concurrently. Tools like [Terraform](https://www.terraform.io/docs/internals/graph.html) similarly help users manage concurrencies by limiting resources.\n\nAs we continue to improve and iterate on our [product vision for continuous delivery](/direction/ops/), we’ll be looking to make future improvements to resource groups and deployment environments. Some of our plans include implicit environment locking, [only allowing forward incremental deployments](https://gitlab.com/gitlab-org/gitlab/issues/25276), and the flexibility to define concurrency values (the default of 1 can’t be configured in this release).\n\nPlease join us in our [public epic](https://gitlab.com/groups/gitlab-org/-/epics/1294) where we discuss continuous delivery and feel free to give feedback or suggestions on ways we can improve deployments. Everyone can contribute.\n\nCover image by [mostafa meraji](https://unsplash.com/@mostafa_meraji?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/turnstile?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,875],{"slug":3777,"featured":6,"template":700},"introducing-resource-groups","content:en-us:blog:introducing-resource-groups.yml","Introducing Resource Groups","en-us/blog/introducing-resource-groups.yml","en-us/blog/introducing-resource-groups",{"_path":3783,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3784,"content":3790,"config":3795,"_id":3797,"_type":14,"title":3798,"_source":16,"_file":3799,"_stem":3800,"_extension":19},"/en-us/blog/introducing-the-gitlab-ci-cd-catalog-beta",{"title":3785,"description":3786,"ogTitle":3785,"ogDescription":3786,"noIndex":6,"ogImage":3787,"ogUrl":3788,"ogSiteName":685,"ogType":686,"canonicalUrls":3788,"schema":3789},"Introducing the GitLab CI/CD Catalog Beta","Discover, reuse, and contribute CI/CD components effortlessly, enhancing collaboration and efficiency when creating pipeline configurations.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099399/Blog/Hero%20Images/Blog/Hero%20Images/security-pipelines_4UHVIJlePT8rEzjvYkGYvi_1750099398604.jpg","https://about.gitlab.com/blog/introducing-the-gitlab-ci-cd-catalog-beta","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing the GitLab CI/CD Catalog Beta\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-12-21\",\n      }",{"title":3785,"description":3786,"authors":3791,"heroImage":3787,"date":3792,"body":3793,"category":718,"tags":3794},[1835],"2023-12-21","DevSecOps is all about speed – achieving rapid progress in software development. To succeed in DevSecOps, organizations require a well-functioning CI/CD pipeline that teams can utilize to automate their development workflows.\n\nHowever, crafting pipeline configurations with YAML can be intricate and challenging because YAML isn't a programming language, Developers may find themselves reinventing the wheel each time they try to create new configurations because they don't have visibility into existing configurations and work that others may have already done, resulting in inefficiency.\n\n[GitLab 16.7](https://about.gitlab.com/releases/2023/12/21/gitlab-16-7-released/) introduces the [CI/CD Catalog](https://docs.gitlab.com/ee/ci/components/#cicd-catalog) (Beta), with the goal of enhancing developer efficiency by addressing three main questions developers encounter when creating pipeline configurations:\n\n* Discoverability: Has someone already created a configuration for my task, and where can I find it?\n* Reusability: Once I find a suitable pipeline, how do I use it effectively?\n* Ease of contribution: I've created a useful configuration; how can I easily share it with the GitLab community?\n\n## What is the GitLab CI/CD Catalog?\n\nThe CI/CD Catalog serves as a centralized hub for developers and organizations to share pre-existing [CI/CD components](https://docs.gitlab.com/ee/ci/components/) and to discover reusable configurations that others may have already developed. Every component published by users will be part of a public catalog accessible to all users, regardless of their organization or project. \n\nThis approach promotes cross-organization collaboration, allowing the entire GitLab community to benefit from the wealth of CI components available. It's a powerful step forward in sharing knowledge among GitLab users, enabling developers to harness the collective expertise of the platform.\n\n## Easy component creation and publishing\n\nIn addition to reusing components, developers can contribute to the GitLab CI/CD community by creating their own components and publishing them in the catalog. This ensures that others can benefit from their expertise and encourages collaboration across the platform.\n\n## How to discover and use components\n\n**1. Opening the CI/CD Component Catalog**\n\nClick on “Search or go to...”\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099406962.png)\n\nOpen the catalog by navigating to “Explore > CI/CD Catalog” or visit this [catalog page](https://gitlab.com/explore/catalog).\n\nUpon accessing the catalog, you'll find a list of CI/CD components projects contributed by your team, organization, or the wider GitLab community.\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099406963.png)\n\n**2. Browsing components**\n\nNavigate through the list of components in the CI/CD Catalog or use the Search bar to find components related to a specific topic.\n\nEach component project contains one or multiple components. Opening a component project will display its documentation, providing details on all available components. This includes insights into how to use each component and understanding the expected input parameters.\n\n**3. Include the selected components in your .gitlab-ci.yml**\n\nNow that you've explored the catalog and selected the desired CI/CD components, integrate them into your project's CI/CD pipeline.\n\nFollow these steps to update your .gitlab-ci.yml file:\n\n1. Open the .gitlab-ci.yml file in your project for editing.\n2. Use the include keyword to add the selected components to your CI configuration. \n3. Ensure that the paths to the component YAML files are correct and specify the appropriate version for each component.\n4. In case the components have input parameters, review the component’s documentation to understand which inputs are required, and add them to your CI configuration.\n5. Save and commit your changes to the .gitlab-ci.yml file.\n\nHere is an example of YAML code that demonstrates how to include a few components and use them with input parameters.\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_1.15.48_PM_aHR0cHM6_1750099406965.png)\n\n## How to create and publish components\n\nHave you crafted a valuable configuration that you'd like to share and contribute to your team or the GitLab community? Here are the six steps to make it happen:\n\n**Step 1: Create a new project and set it as a component project**\n\n1. On the left sidebar, select **Search or go to** and find your project.\n2. On the left sidebar, select Settings > General.\n3. Expand Visibility, project features, permissions.\n4. Scroll down to CI/CD Catalog resource and select the toggle to set the project as a CI/CD Catalog resource.\n5. Ensure that your project description is filled out; this information will be showcased in the catalog, providing users with insights into the purpose and functionality of your components.\n6. Create a .gitlab-ci.yml file in the root of the repository. You will need this file to test and release the components as described in steps 4 and 5 below. Note: This step only needs to be done once for any project that contains components.\n\n**Step 2: Create the components**\n\n1. Create a /templates folder in the root directory of the project.\n2. In this templates directory, create one YAML template file (ending in .yml) for each component.   \n3. The template can optionally include a description of input arguments using the `spec` keyword if the component requires input parameters, and the definition of jobs, that may include references to values using the interpolation format $[[ inputs.input-name ]]. Ensure you use three dash lines between the spec header, and job definitions.\n\nHere is an example of a `deploy.yml` template that gets input parameters:\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_11.34.20_AM_aHR0cHM6_1750099406966.png)\n\nIn this template, we've defined two input parameters, `stage` and `environment`, both with default values. In the content section, a job is defined that interpolates these input arguments.\n\n**Step 3: Create components documentation** \n\nCreate a README.md file in the root of the project, including information about the components. Explain the component's functionality, detail input parameters, and provide illustrative examples. This ensures clarity for component consumers on how to use them.\n\nThis is an example of component documentation:\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099406967.png)\n\nAdditional information can be found in our [CI/CD components](https://docs.gitlab.com/ee/ci/components/index.html#components-repository) documentation. \n\n**Step 4: Add tests to the components (recommended)**\n\nDeveloping a component follows a standard software development cycle with stages like build, test, and deploy. It's highly recommended to test your components before publishing them. Check out this example test, which queries the GitLab REST API to check whether a component job has been added to the pipeline. Feel free to use it, and consider adding more tests to ensure your components work as expected.\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.32.53_PM_aHR0cHM6_1750099406968.png)\n\nInclude all your test jobs in the **.gitlab-ci.yml** file in your Catalog project.\n\n**Step 5: Prepare your CI/CD configuration for publishing**\n\n1. Create a release job in the **.gitlab-ci.yml** file in the component project using the `Release` keyword.  See the job example:\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.34.27_PM_aHR0cHM6_1750099406969.png)\n\n__Note:__ Do not \"create release\" from GitLab UI since this soon won't be supported for a Component Catalog.\n\n2. We recommend adding this rule in the Release job; this will automatically trigger the Release job only when creating a git tag starts with digits in the project, following semantic release conventions (1.0.0 for example).\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_1.21.30_PM_aHR0cHM6_1750099406970.png)\n\n3. So this is how we recommend your job to look: \n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.37.09_PM_aHR0cHM6_1750099406970.png)\n\n4. To manually release components, add manual rule as below, so when the pipeline is triggered, someone will need to manually run the release job. \n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.38.18_PM_aHR0cHM6_1750099406971.png)\n\nHere is the release job with the `when:manual` rule:\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.41.00_PM_aHR0cHM6_1750099406972.png)\n\n**Step 6: Publish your components**\n\nOnce you are satisfied with your components, and all tests have passed successfully, it's time to publish a new version by creating a git tag, so they will be available in the CI/CD Catalog.\n\n1. Create a Git tag using the semantic versioning format \"MAJOR.MINOR.PATCH\". \n\n2. You can create tags through the UI by navigating to Code -> Tags -> New Tag, or via the CLI using `git tag`. \n\n3. Creating the tag will trigger a pipeline that runs the Release job if all tests pass successfully. The component project will then be assigned the version you defined in the tag, and it will appear in the catalog.\n\n### Example projects\n\n* [GitLab official components](https://gitlab.com/components)\n\n### Documentation \n\nFor more details on using components from the CI/CD Catalog and maximizing their potential within your projects, refer to the official [CI/CD Catalog documentation](https://docs.gitlab.com/ee/ci/components/#cicd-catalog). This documentation provides in-depth insights into the functionality.\n\n> [Take a tour](https://gitlab.navattic.com/cicd-catalog) of the GitLab CI/CD Catalog.\n\n_A special thank you to [Dov Hershkovitch](https://about.gitlab.com/company/team/#dhershkovitch) and [Fabio Pitino](https://gitlab.com/fabiopitino) for their invaluable content reviews and contributions to this blog post._",[9,1062,721,917],{"slug":3796,"featured":6,"template":700},"introducing-the-gitlab-ci-cd-catalog-beta","content:en-us:blog:introducing-the-gitlab-ci-cd-catalog-beta.yml","Introducing The Gitlab Ci Cd Catalog Beta","en-us/blog/introducing-the-gitlab-ci-cd-catalog-beta.yml","en-us/blog/introducing-the-gitlab-ci-cd-catalog-beta",{"_path":3802,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3803,"content":3809,"config":3817,"_id":3819,"_type":14,"title":3820,"_source":16,"_file":3821,"_stem":3822,"_extension":19},"/en-us/blog/introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci",{"title":3804,"description":3805,"ogTitle":3804,"ogDescription":3805,"noIndex":6,"ogImage":3806,"ogUrl":3807,"ogSiteName":685,"ogType":686,"canonicalUrls":3807,"schema":3808},"OIDC simplifies GitLab CI/CD authentication with Google Cloud","OpenID Connect can sometimes be complex, but it's the safer and recommended way to authenticate your GitLab pipeline with Google Cloud. This tutorial shows you how.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669863/Blog/Hero%20Images/security-pipelines.jpg","https://about.gitlab.com/blog/introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How OIDC can simplify authentication of GitLab CI/CD pipelines with Google Cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Hiroki Suezawa\"},{\"@type\":\"Person\",\"name\":\"Dhruv Jain\"}],\n        \"datePublished\": \"2023-06-28\",\n      }",{"title":3810,"description":3805,"authors":3811,"heroImage":3806,"date":3814,"body":3815,"category":697,"tags":3816},"How OIDC can simplify authentication of GitLab CI/CD pipelines with Google Cloud",[3812,3813],"Hiroki Suezawa","Dhruv Jain","2023-06-28","In recent years, the [integration of cloud services and GitLab through\nGitOps](https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab/)\nhas become very common. Applications are now continuously tested and\ndeployed through [continuous integration and delivery\n(CI/CD)](https://about.gitlab.com/topics/ci-cd/); cloud environments are\nmanaged in code through Infrastructure as Code (IaC) using tools like\nTerraform; and GitLab CI is used as a core tool to perform these GitOps\nprocesses.\n\n\nAt the same time, [software supply chain\nattacks](https://about.gitlab.com/blog/the-ultimate-guide-to-software-supply-chain-security/)\nhave increased. To reduce the risk of an attack, the use of OpenID Connect\n([OIDC](https://docs.gitlab.com/ee/integration/openid_connect_provider.html#introduction-to-openid-connect))\nauth is recommended, and GitLab 15.7 introduced [ID\ntokens](https://docs.gitlab.com/ee/ci/secrets/id_token_authentication.html),\na mechanism for secure OIDC integration.\n\n\nHowever, OIDC integration can be complex for beginners and can be difficult\nto configure properly. Therefore, GitLab's Infrastructure Security Team has\ncreated a Terraform module for configuring Google Cloud and a CI template\nfor GitLab CI so GitLab CI and Google Cloud can be securely integrated.\n\n\nThis tutorial explains how to use [these OIDC\nmodules](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules).\n\n\n## Why OIDC?\n\nThe integration between Google Cloud and GitLab CI has often been done by\nadding a static key of the service account in Google Cloud to the\nenvironment variables of CI. However, this method has the following\nproblems:\n\n\n- The risk of compromise is high because the same key can be used to\nmanipulate the cloud environment over time.\n\n- Because static keys are portable, there is no link between the key and the\nenvironment in which it is used, making it difficult to identify where the\nkey is being used.\n\n\nOIDC authentication can solve the above problems by providing the following\nbenefits:\n\n- No need to issue static keys, eliminating the need for long-term key\nmanagement.\n  - It also eliminates the compliance need of rotating the secrets every few months.\n- Low risk of leakage due to temporary tokens issued.\n\n- Because the CI used is tied to the Google Cloud environment, it is\npossible to properly manage where the service account is used.\n\nIn addition, other settings such as CI and CD isolation can be configured\nusing [the claims provided by GitLab\nCI](https://docs.gitlab.com/ee/ci/secrets/id_token_authentication.html).\n\n\n## OIDC authentication with Google Cloud\n\nThe OIDC integration between Google Cloud and GitLab CI works as follows:\n\n\n- Preparation (areas to configure in Terraform in OIDC models)\n  1. Create a service account in Google Cloud for CI integration and set up the appropriate roles.\n  1. Create a Google Cloud Workload Identity pool and provider, and configure integration with GitLab CI.\n  1. Assign the Workload Identity User role to the service account.\n\n\n\n\n![Simplified\ndiagram](https://about.gitlab.com/images/blogimages/2023-06-30-introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci/oidc-auth-diagram.png){:\n.shadow}\n\n\nGitLab CI in action (simplified by the GitLab CI template in OIDC modules)\n\n{: .note .text-center}\n\n\nGoogle Cloud authenticates using an ID token issued on GitLab CI, so there\nis no need to issue a Google Cloud service account key.\n\n\n## How to use a Terraform module\n\nThe process of configuring a Terraform module to establish a connection\nbetween Google Cloud and GitLab using OIDC is fairly simple. This module\ntakes care of the following steps:\n\n1. Create the Google Cloud Workload Identity Pool.\n\n1. Create a Workload Identity Provider.\n\n1. Grant permissions for service account impersonation.\n\n\nNote: Your account must have at least the Workload Identity Pool Admin\npermission on the Google Cloud project.\n\n\n```terraform\n\n# terraform\n\nmodule \"gl_oidc\" {\n source = \"gitlab.com/gitlab-com/gcp-oidc/google\"\n version = \"3.0.0\"\n google_project_id = GOOGLE_PROJECT_ID\n gitlab_project_id = GITLAB_PROJECT_ID\n oidc_service_account = {\n   \"sa\" = {\n     sa_email  = \"SERVICE_ACCOUNT_EMAIL\"\n     attribute = \"attribute.project_id/GITLAB_PROJECT_ID\"\n   }\n }\n}\n\n```\n\n\nThe above sample module can be used to configure OIDC. There are some\nadditional parameters that can be used to configure this module further (a\ndetailed list and description of those parameters can be found\n[here](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/tree/main/#configure-gitlab-for-oidc-integration-using-terraform-module)).\n \nBy default, all branches of the project are authenticated to Google Cloud,\nbut you can specify more granular conditions, such as the branch name of the\ncommit that triggered the CI, or authenticating only with a specific tag.\n\n\nFurther settings can be made by changing the following attribute settings in\naccordance with the ID token claim:\n\n\n```\n  oidc_service_account = {\n    \"sa\" = {\n      sa_email  = \"SERVICE_ACCOUNT_EMAIL\"\n      attribute = \"attribute.project_id/GITLAB_PROJECT_ID\"\n    }\n```\n\n\nCode files for this module are available\n[here](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/tree/main/terraform-modules/gcp-oidc).\n\n\n## How to use the CI template\n\n[The CI\ntemplate](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/blob/main/templates/gcp_auth.yaml)\nmakes GitLab CI very easy for Google Cloud OIDC authentication. This CI\ntemplate supports [Application Default\nCredentials](https://cloud.google.com/docs/authentication/application-default-credentials)\nand can be used from IaC such as Terraform, CLI such as gcloud, and SDKs in\nPython and Go.\n\n\nFor example, if you want to use the CI template for Terraform, you can\nwrite:\n\n\n```\n\n# You should upgrade to the latest version. You can find the latest version\nat\nhttps://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/releases\n\ninclude:\n  - remote: 'https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/raw/3.0.0/templates/gcp_auth.yaml'\n\nterraform:\n  image:\n    name: hashicorp/terraform:1.5.3\n    entrypoint:\n      - /usr/bin/env\n      - \"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"\n  extends: .google-oidc:auth\n  variables:\n    WI_POOL_PROVIDER: //iam.googleapis.com/projects/GOOGLE_PROJECT_ID/locations/global/workloadIdentityPools/WORKLOAD_IDENTITY_POOL/providers/WORKLOAD_IDENTITY_POOL_PROVIDER\n    SERVICE_ACCOUNT: SERVICE_ACCOUNT_EMAIL\n  script:\n    - terraform init\n    - terraform plan\n```\n\n\n### Required variables\n\n- WI_POOL_PROVIDER(under .google-oidc:) - Full canonical resource name of\nthe workload identity pool provider. This value must be written under\n.google-oidc: like this.\n\n- SERVICE_ACCOUNT - Service Account email address\n\n\nA detailed list and description of those parameters can be found\n[here](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/blob/main/README.md#using-oidc-in-pipelines).\n\n\nAs a note, you cannot use `before_script` in the job that uses this template\nbecause the way GitLab CI works will result in OIDC code being overwritten.\nCI template uses `before_script` to perform the initial configuration of\nOIDC.\n\n\nCode samples for this module are available\n[here](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/tree/main/samples/ci/gcp).\n\n\n## Next steps\n\nThis article has introduced OIDC modules for OIDC integration and secure\nauthentication between Google Cloud and GitLab CI. In short, we are doing\nthe following steps:\n\n\n1. Setting up a service account\n\n1. Granting permissions to the service account\n\n1. Running the Terraform module\n\n1. Setting up CI pipeline\n\n\nYou can find the relevant sample for the above steps\n[here](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/tree/main/samples).\n\n\nAlso, GitLab is currently developing a [CI Catalog and CI\nComponents](https://about.gitlab.com/blog/use-inputs-in-includable-files/).\nWe plan to support them.\n\n\nThe GitLab Infrastructure Security Team will continue to improve the modules\nas we receive feedback, and we hope to consider and release components that\nmaintain a high level of security and usability for both internal and\nexternal use. \n\n\n## Read more\n\n- [Configure OIDC with GCP Workload Identity\nFederation](https://docs.gitlab.com/ee/ci/cloud_services/google_cloud/)\n\n- [Workload Identity Federation on Google\nCloud](https://cloud.google.com/iam/docs/workload-identity-federation)\n\n- [Terraform for\ngoogle_iam_workload_identity_pool_provider](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/iam_workload_identity_pool_provider)\n\n- [OIDC Authentication using ID\ntokens](https://docs.gitlab.com/ee/ci/secrets/id_token_authentication.html)\n",[917,9,697,830],{"slug":3818,"featured":6,"template":700},"introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci","content:en-us:blog:introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci.yml","Introduction Of Oidc Modules For Integration Between Google Cloud And Gitlab Ci","en-us/blog/introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci.yml","en-us/blog/introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci",{"_path":3824,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3825,"content":3831,"config":3836,"_id":3838,"_type":14,"title":3839,"_source":16,"_file":3840,"_stem":3841,"_extension":19},"/en-us/blog/ios-cicd-with-gitlab",{"title":3826,"description":3827,"ogTitle":3826,"ogDescription":3827,"noIndex":6,"ogImage":3828,"ogUrl":3829,"ogSiteName":685,"ogType":686,"canonicalUrls":3829,"schema":3830},"Tutorial: iOS CI/CD with GitLab","Learn how to create an automated CI/CD pipeline using GitLab and fastlane.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669340/Blog/Hero%20Images/john-cameron-DgRb7aAGK4k-unsplash.jpg","https://about.gitlab.com/blog/ios-cicd-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: iOS CI/CD with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darby Frey\"}],\n        \"datePublished\": \"2023-06-07\",\n      }",{"title":3826,"description":3827,"authors":3832,"heroImage":3828,"date":3833,"body":3834,"category":718,"tags":3835},[913],"2023-06-07","Creating an automated [CI/CD](https://docs.gitlab.com/ee/ci/) pipeline for\nan Apple iOS application can be challenging. Configuring build environments\nand managing code signing can be very time-consuming and error-prone, and\nwhen you get that all working, you still need a way to send your app to\nApple.\n\n\nGitLab makes this much easier with [GitLab Mobile\nDevOps](https://docs.gitlab.com/ee/ci/mobile_devops.html).\n\n\nGitLab Mobile DevOps is a collection of features built right into GitLab to\nsolve the biggest challenges mobile teams face in establishing a DevOps\npractice.\n\n\nIn this blog post, I’ll demonstrate how to set up an automated CI/CD\npipeline using GitLab and [fastlane](https://fastlane.tools/).\n\n\n## Prerequisites\n\nTo get started, there are a few prerequisites you’ll need:\n\n\n* An Apple Developer account -\n[https://developer.apple.com/](https://developer.apple.com/)\n\n* Ruby and XCode command line tools installed on your local machine\n[https://docs.fastlane.tools/getting-started/ios/setup](https://docs.fastlane.tools/getting-started/ios/setup/) \n\n\n> Try out our [Android CI/CD with GitLab\ntutorial](/blog/android-cicd-with-gitlab/).\n\n\n## Reference project\n\nFor this walkthrough, we’ll use the iOS demo project for reference:\n[https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo)\n\n\n## Install fastlane\n\nIf you haven’t done so yet, the first step will be to install fastlane. Do\nthis by creating a file in the root of your project called Gemfile. Give it\nthe following contents:\n\n\n```\n\nsource \"https://rubygems.org\"\n\n\ngem \"fastlane\"\n\n```\n\n\nThen, from the terminal in your project, run:\n\n\n```\n\nbundle install\n\n```\n\n\nThis command will install fastlane, and all of its related dependencies.\n\n\n## Initialize fastlane\n\nNow that fastlane is installed, we can set it up for our project. Run the\nfollowing command from the terminal in your project and choose Option No. 2\nsince we will be targeting Test Flight in this tutorial:\n\n\n```\n\nbundle exec fastlane init\n\n```\n\n\nRunning this command will create a new folder in your project called\n`fastlane`. This folder will contain two files `Appfile` and `Fastfile`. \n\n\n![Initialize\nFastlane](https://about.gitlab.com/images/blogimages/2023-04-15-ios-cicd-with-gitlab/fastlane-init.png)\n\n\nThe Appfile contains the configuration information for the app, and the\nFastfile has some sample code that we will replace later. See the fastlane\ndocs for more information about the configuration details in the Appfile\n[https://docs.fastlane.tools/advanced/Appfile/](https://docs.fastlane.tools/advanced/Appfile/)\n\n\n## Initialize fastlane match\n\nThe next step will be to set up fastlane Match, which is the part of\nfastlane that handles code signing. For more information on fastlane match,\nsee the docs\n[https://docs.fastlane.tools/actions/match/](https://docs.fastlane.tools/actions/match/\n)\n\n\nWe’ll start by running the following command from the terminal in your\nproject:\n\n\n```\n\nbundle exec fastlane match init\n\n```\n\n\nThis command will prompt you to choose which storage backend you want to use\n(select gitlab_secure_files) and to input your project path (for example:\ngitlab-org/gitlab). It will then generate a fastlane Matchfile configured to\nuse your project as the storage backend for fastlane Match.\n\n\n![Initialize fastlane\nMatch](https://about.gitlab.com/images/blogimages/2023-04-15-ios-cicd-with-gitlab/match-init.png)\n\n\n## Generate a project access token\n\nNext, you'll need a GitLab Access Token to use fastlane Match from your\nlocal machine. To create a project access token, visit the Access Tokens\nsection under Settings in your GitLab project. Create a new token with\nmaintainer access to the “api” scope.\n\n\nThen run the following command from the terminal in your project replacing\n“YOUR_NEW_TOKEN” with the access token you just generated:\n\n\n```\n\nexport PRIVATE_TOKEN=YOUR_NEW_TOKEN\n\n```\n\n\nThis will configure fastlane to use this access token when making fastlane\nMatch requests to your project.\n\n\n## Generate signing certificates\n\nNow that fastlane Match is configured, we can use it to generate the signing\ncertificates and provisioning profiles for our app and upload them to\nGitLab.\n\n\nNOTE: If you already have these files for your app, see the instructions in\nthis blog post on how to use fastlane to import your existing code signing\nfiles\n[/blog/mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane/](/blog/mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane/).\n\n\nRun the following command from the terminal in your project to generate\ndevelopment code signing files and upload them to GitLab.\n\n\n```\n\nbundle exec fastlane match development\n\n```\n\n\nWhen this command completes, go to the CI/CD settings page in your project\nand scroll down to the Secure Files section to see the files that were just\ngenerated and added to your project.\n\n\nWhile we’re here, we can go ahead and do that same thing for the appstore\ncode signing files. Run the following command to generate the appstore code\nsigning files and upload them to GitLab.\n\n\n```\n\nbundle exec fastlane match appstore\n\n```\n\n\n## Update Xcode configuration\n\nWith the code signing files ready to go, we have one small change to make in\nXcode. In your project in Xcode, go to the Signing & Capabilities section\nand disable automatically managing code signing. Then, select the\nappropriate provisioning profile and signing certificate from the list based\non your build target. The certificates we just generated will show up in\nthat list.\n\n\n![Configure Xcode Provisioning\nProfiles](https://about.gitlab.com/images/blogimages/2023-04-15-ios-cicd-with-gitlab/xcode.png)\n\n\nWith all of our code signing configuration in place, we can now move on to\nsetting up the integration with the Apple App Store.\n\n\n## Apple App Store integration\n\nThe final bit of configuration is the Apple App Store integration. To do\nthis, we’ll need to create an API key in App Store Connect. See the\ninstructions here to create and download the key file to your location\nmachine. This key should have the role of App Manager.\n[https://developer.apple.com/documentation/appstoreconnectapi/creating_api_keys_for_app_store_connect_api](https://developer.apple.com/documentation/appstoreconnectapi/creating_api_keys_for_app_store_connect_api)\n\n\nOnce the key is generated, go to Settings, Integrations in your project, and\nclick on the integration for Apple App Store Connect. You’ll be asked to\nsupply the issuer ID and key ID from App Store Connect, along with the key\nfile you just downloaded. With all of that configuration in place, click the\nTest Settings button to ensure everything works. If it gives you an error,\ndouble check your settings and try again. Once it’s working, click Save\nChanges to save and activate the integration. \n\n\nWith the integration activated, the following CI variables are added to all\npipelines on protected branches and tags:\n\n\n* `APP_STORE_CONNECT_API_KEY_ISSUER_ID`\n\n* `APP_STORE_CONNECT_API_KEY_KEY_ID`\n\n* `APP_STORE_CONNECT_API_KEY_KEY`\n\n\nThese CI variables can be used by fastlane or any custom tooling to interact\nwith the Apple App Store to upload builds, or perform other API enabled\ntasks.\n\n\n## Fastfile\n\nWith all of our configuration in place, we can now drop in a sample Fastfile\nto show how to perform the build, sign, and release actions.\n\n\nFrom the [sample\nproject](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo),\ncopy the contents of the fastlane/Fastfile and paste it into the Fastfile in\nyour project, replacing the existing content. \n\n\n[https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo/-/blob/main/fastlane/Fastfile](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo/-/blob/main/fastlane/Fastfile)\n\n\nThis sample Fastfile contains two lanes, which are actions fastlane can\nexecute. The lanes in this file are `build` and `beta`. \n\n\n### Build\n\nThe build lane will perform just a couple of actions to `setup_ci`, `match`,\nand `build_app`. This will use the development certificate we generated with\nfastlane Match earlier to build and sign the app for development. \n\n\n### Beta\n\nThe beta lane takes a few more steps to `setup_ci`, `match`,\n`app_store_connect_api_key`, `increment_build_number`, `build_app`, and\n`upload_to_testflight`. This lane will use the appstore certificates we\ngenerated with faslane Match earlier to build and sign the app for an\nappstore release. This lane also uses the App Store Connect integration to\nconnect to the app store to determine the next build number to use, and to\nupload the final build to Test Flight. \n\n\n### .gitlab-ci.yml\n\nWith the fastlane configuration ready to go, the last step is to hook it up\nto GitLab CI. \n\n\nFrom the [sample\nproject](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo),\ncopy the contents of the `.gitlab-ci.yml` file and paste it into the\nproject. \n\n\n[https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo/-/blob/main/.gitlab-ci.yml](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo/-/blob/main/.gitlab-ci.yml\n)\n\n\nThis is a simplified CI configuration that created two CI jobs to run each\nof the lanes in fastlane on the GitLab macOS shared runners. The build job\nwill run for all CI pipelines and the beta job will only be run on CI\npipelines on the master branch. The beta job is also manually triggered, so\nyou can control when the beta release is pushed to Test Flight. \n\n\nWith all of this in place, commit all of these changes and push them up to\nyour project. The CI pipeline will kick off, and you can see these jobs in\naction. \n\n\nCover image by \u003Ca\nhref=\"https://unsplash.com/@john_cameron?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">John\nCameron\u003C/a> on \u003Ca\nhref=\"https://unsplash.com/photos/DgRb7aAGK4k?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>\n\n{: .note}\n",[721,9,917],{"slug":3837,"featured":6,"template":700},"ios-cicd-with-gitlab","content:en-us:blog:ios-cicd-with-gitlab.yml","Ios Cicd With Gitlab","en-us/blog/ios-cicd-with-gitlab.yml","en-us/blog/ios-cicd-with-gitlab",{"_path":3843,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3844,"content":3850,"config":3855,"_id":3857,"_type":14,"title":3858,"_source":16,"_file":3859,"_stem":3860,"_extension":19},"/en-us/blog/ios-publishing-with-gitlab-and-fastlane",{"title":3845,"description":3846,"ogTitle":3845,"ogDescription":3846,"noIndex":6,"ogImage":3847,"ogUrl":3848,"ogSiteName":685,"ogType":686,"canonicalUrls":3848,"schema":3849},"How to publish iOS apps to the App Store with GitLab and fastlane","See how GitLab, together with fastlane, can build, sign, and publish apps for iOS to the App Store.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680470/Blog/Hero%20Images/ios-publishing-cover.jpg","https://about.gitlab.com/blog/ios-publishing-with-gitlab-and-fastlane","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to publish iOS apps to the App Store with GitLab and fastlane\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2019-03-06\",\n      }",{"title":3845,"description":3846,"authors":3851,"heroImage":3847,"date":3852,"body":3853,"category":718,"tags":3854},[803],"2019-03-06","_Note: You may also find the blog post [Tutorial: iOS CI/CD with\nGitLab](/blog/ios-cicd-with-gitlab/) from June 2023 helpful._\n\n\nRecently we published a [blog post\n\ndetailing how to get up and running quickly with your Android\napp](/blog/android-publishing-with-gitlab-and-fastlane/), GitLab, and\n\n[_fastlane_](https://fastlane.tools). In this edition, let's look at how to\nget\n\na build of an iOS app up and running, including publishing all the way to\n\nTestFlight. To see how cool this can be, check out this [video\n\nof me making a change on an iPad Pro using the GitLab Web\nIDE](https://www.youtube.com/watch?v=325FyJt7ZG8), getting that\n\nbuilt, and then receiving an update to the test version of my application on\nthe\n\nvery same iPad Pro I was using to develop.\n\n\nFor the purposes of this article, we'll be using a [simple Swift iOS\napp](https://gitlab.com/jyavorska/flappyokr)\n\nthat I recorded the video with.\n\n\n## First, a note on Apple Store configuration\n\n\nWhat we're going to need in order to set all of this up is a mobile\napplication set up\n\nin the App Store, distribution certificates, and a provisioning profile that\nties\n\nit all together.\n\n\nMost of the complexity here actually has to do with setting up your signing\n\nauthority for the App Store. Hopefully in most cases this is already good to\ngo\n\nfor you; if you're a new app developer, I'll try to get you started on the\nright\n\ntrack, but the intricacies of Apple certificate management is out of the\nscope of\n\nthis article, and tends to change somewhat frequently. But, this information\n\nshould get you going.\n\n\n### My apps\n\n\nYour application will need to be set up in App Store Connect so you have an\nID\n\nfor your application, which will be used in your `.xcodebuild`\nconfiguration.\n\nYour app profile and ID are what tie together the code builds with pricing\nand\n\navailability, as well as TestFlight configuration for distributing testing\n\napplications to your users. Note that you don't need to set up public\ntesting –\n\nyou can use personal testing with TestFlight just fine as long as your\ntesting\n\ngroup is small, and the setup is simpler and requires no additional\napprovals\n\nfrom Apple.\n\n\n### Provisioning profile\n\n\nIn addition to the app setup, you need iOS distribution and development keys\n\ncreated in the Certificates, Identifiers, and Profiles section of the Apple\n\nDeveloper console. Once these certificates are created, you can create a\n\nprovisioning profile to unify everything.\n\n\nAlso note that the user you will authenticate with needs to be able to\ncreate\n\ncertificates, so please ensure that they have that ability or you will see\nan\n\nerror during the [_cert_ and\n_sigh_](https://docs.fastlane.tools/codesigning/getting-started/#using-cert-and-sigh)\n\nsteps.\n\n\n### Other options\n\n\nThere are several more ways to set up your certificates and profiles than\nthe\n\nsimple method I've described above, so if you're doing something different\nyou may\n\nneed to adapt. The most important thing is that you need your `.xcodebuild`\n\nconfiguration to point to the appropriate files, and your keychain needs to\nbe\n\navailable on the build machine for the user that the runner is running as.\nWe're\n\nusing _fastlane_ for signing, so if you run into trouble here or want to\nlearn\n\nmore about your options, take a look at their extensive [code signing\ndocumentation](https://docs.fastlane.tools/codesigning/getting-started/).\n\n\nFor this sample project, I'm using the [_cert_ and\n_sigh_](https://docs.fastlane.tools/codesigning/getting-started/#using-cert-and-sigh)\n\napproach, but the [match\n\napproach](https://docs.fastlane.tools/codesigning/getting-started/#using-match)\nmay be better for actual enterprise use.\n\n\n## How to set up GitLab and _fastlane_\n\n\n### How to set up your CI/CD runner\n\n\nWith the above information gathered or set up, we can start with configuring\nthe\n\nGitLab runner on a macOS device. Unfortunately, building on macOS is the\nonly\n\nrealistic way to build iOS apps. This is potentially changing in the future;\n\nkeep an eye on projects like [xcbuild](https://github.com/facebook/xcbuild)\nand\n\n[isign](https://github.com/saucelabs/isign), as well as our own internal\nissue\n\n[gitlab-ce#57576](https://gitlab.com/gitlab-org/gitlab-ce/issues/57576) for\n\ndevelopments in this area.\n\n\nIn the meantime, setting up the runner is fairly straightforward. You can\nfollow\n\nour most current [instructions for setting up GitLab Runner on\nmacOS](https://docs.gitlab.com/runner/install/osx.html)\n\nto get that up and running.\n\n\nNote: Be sure to set your GitLab runner to use the `shell` executor. For\nbuilding iOS on\n\nmacOS, it's a requirement to operate directly as the user on the machine\nrather\n\nthan using containers. Note that when you're using the shell executor, the\n\nbuild and tests run as the identity of the runner logged in user, directly\non\n\nthe build host. This is less secure than using container executors, so\nplease\n\ntake a look at our [security implications\ndocumentation](https://docs.gitlab.com/runner/security/#usage-of-shell-executor)\n\nfor additional detail on what to keep in mind in this scenario.\n\n\n```\n\nsudo curl --output /usr/local/bin/gitlab-runner\nhttps://gitlab-runner-downloads.s3.amazonaws.com/latest/binaries/gitlab-runner-darwin-amd64\n\nsudo chmod +x /usr/local/bin/gitlab-runner\n\ncd ~\n\ngitlab-runner install\n\ngitlab-runner start\n\n```\n\n\nWhat you need to be careful about here is ensuring your Apple keychain is\nset up\n\non this host and has access to the keys that Xcode needs in order\n\nto build. The easiest way to test this is to log in as the user that will be\n\nrunning the build and try to build manually. You may receive system prompts\nfor\n\nkeychain access which you need to \"always allow\" for CI/CD to work. You will\nprobably\n\nalso want to log in and watch your first pipeline or two to make sure that\n\nno prompts come up for additional keychain access. Unfortunately Apple does\nnot\n\nmake this super easy to use in unattended mode, but once you have it working\nit\n\ntends to stay that way.\n\n\n### _fastlane_ init\n\n\nIn order to start using _fastane_ with your project, you'll need to run\n\n`fastlane init`. Simply follow the [instructions\n\nto install and run\n_fastlane_](https://docs.fastlane.tools/getting-started/ios/setup/), being\nsure to use the instructions in the\n\n[Use a\nGemfile](https://docs.fastlane.tools/getting-started/ios/setup/#use-a-gemfile)\n\nsection, since we do want this to run quickly and predictably via unattended\nCI.\n\n\nFrom your project directory, you can run the following commands:\n\n\n```\n\nxcode-select --install\n\nsudo gem install fastlane -NV\n\n# Alternatively using Homebrew\n\n# brew cask install fastlane\n\nfastlane init\n\n```\n\n\n_fastlane_ will ask you for some basic configuration and then create a\nproject folder\n\ncalled `fastlane` in your project which will contain three files:\n\n\n#### 1. `fastlane/Appfile`\n\n\nThis file is straightforward, so you just want to check to make sure that\nthe Apple\n\nID and app ID that you set up earlier are correct.\n\n\n```\n\napp_identifier(\"com.vontrance.flappybird\") # The bundle identifier of your\napp\n\napple_id(\"your-email@your-domain.com\") # Your Apple email address\n\n```\n\n\n#### 2. `fastlane/Fastfile`\n\n\nThe `Fastfile` defines the build steps. Since we're using a lot of the\nbuilt-in\n\ncapability of _fastlane_ this is really straightforward. We create a single\n\nlane which gets certificates, builds, and uploads the new build to\nTestFlight.\n\nOf course, you may want to split these out into different jobs depending on\nyour\n\nuse case. Each of these steps, `get_certificates`,\n`get_provisioning_profile`,\n\n`gym`, and `upload_to_testflight` are pre-bundled actions already included\nwith\n\n_fastlane_.\n\n\n`get_certificates` and `get_provisioning_profile` are actions associated\nwith\n\nthe [_cert_ and\n_sigh_](https://docs.fastlane.tools/codesigning/getting-started/#using-cert-and-sigh)\n\napproach to code signing; if you're using _fastlane_\n[match](https://docs.fastlane.tools/codesigning/getting-started/#using-match)\n\nor some other approach you may need to update these.\n\n\n```yaml\n\ndefault_platform(:ios)\n\n\nplatform :ios do\n  desc \"Build the application\"\n  lane :flappybuild do\n    get_certificates\n    get_provisioning_profile\n    gym\n    upload_to_testflight\n  end\nend\n\n```\n\n\n#### 3. `fastlane/Gymfile`\n\n\nThis `gym` file is optional, but I created it manually in order to override\nthe default\n\noutput directory and place the output in the current folder. This makes\nthings a\n\nbit easier for CI. You can read more about `gym` and its options in the\n\n[gym documentation](https://docs.fastlane.tools/actions/gym/).\n\n\n```yaml\n\noutput_directory(\"./\")\n\n```\n\n\n### Our `.gitlab-ci.yml` configuration file\n\n\nNow, we have a CI/CD runner associated with our project so we're ready to\ntry a\n\npipeline. Let's see what's in our `.gitlab-ci.yml` file:\n\n\n```yaml\n\nstages:\n  - build\n\nvariables:\n  LC_ALL: \"en_US.UTF-8\"\n  LANG: \"en_US.UTF-8\"\n  GIT_STRATEGY: clone\n\nbuild:\n  stage: build\n  script:\n    - bundle install\n    - bundle exec fastlane flappybuild\n  artifacts:\n    paths:\n    - ./FlappyBird.ipa\n```\n\n\nYes, that's really it! [We set UTF-8 locale for _fastlane_ per their\n\nrequirements](https://docs.fastlane.tools/getting-started/ios/setup/#set-up-environment-variables),\n\nuse a `clone` strategy with the `shell` executor to ensure we have a clean\n\nworkspace each build, and then simply call our `flappybuild` _fastlane_\ntarget,\n\nwhich we discussed above. This will build, sign, and deploy the latest build\nto\n\nTestFlight.\n\n\nWe also gather the artifact and save it with the build – note that the\n`.ipa`\n\nformat output is a signed ARM executable, so not something you can run in\nthe\n\nsimulator. If you wanted a simulator output to be saved with the build, you\n\nwould simply add a build target that produces it and then add it to the\nartifact\n\npath.\n\n\n### Other environment variables\n\n\nThere are some special environment variables behind the scenes here that are\n\nmaking this work.\n\n\n#### `FASTLANE_APPLE_APPLICATION_SPECIFIC_PASSWORD` and `FASTLANE_SESSION`\n\n\nIn order to authenticate against the App Store for the TestFlight upload,\n\n_fastlane_ must be able to authenticate. In order to do this, you need to\n\ncreate an app-specific password to be used by CI. You can read more about\nthis\n\nprocess in [this\ndocumentation](https://docs.fastlane.tools/best-practices/continuous-integration/#use-of-application-specific-passwords-and-spaceauth).\n\n\nIf you're using two-factor authentication, you'll also need to generate the\n\n`FASTLANE_SESSION` variable – instructions are in the same place.\n\n\n#### `FASTLANE_USER` and `FASTLANE_PASSWORD`\n\n\nIn order for [_cert_ and\n_sigh_](https://docs.fastlane.tools/codesigning/getting-started/#using-cert-and-sigh)\n\nto be able to fetch the provisioning profile and certificates on demand, the\n\n`FASTLANE_USER` and `FASTLANE_PASSWORD` variables must be set. You can read\nmore\n\nabout this\n[here](https://docs.fastlane.tools/best-practices/continuous-integration/#environment-variables-to-set).\n\nYou may not need these if you are using some other approach to signing.\n\n\n## In closing...\n\n\nRemember, you can see a working project with all of this set up by heading\nover\n\nto my [simple demo app](https://gitlab.com/jyavorska/flappyokr).\n\n\nHopefully this has been helpful and has inspired you to get iOS builds and\n\npublishing working within your GitLab project. There is some good additional\n\n[CI/CD\nbest-practice](https://docs.fastlane.tools/best-practices/continuous-integration/)\n\ndocumentation for _fastlane_ if you get stuck anywhere,\n\nand you could also consider using the `CI_BUILD_ID` (which increments each\nbuild)\n\nto [automatically increment a\nversion](https://docs.fastlane.tools/best-practices/continuous-integration/gitlab/#auto-incremented-build-number).\n\n\nAnother great capability of _fastlane_ to try is the ability to\n\n[automatically generate\nscreenshots](https://docs.fastlane.tools/getting-started/ios/screenshots/)\n\nfor the App Store – it's just as easy to set up as the rest of this has\nbeen.\n\n\nWe'd love to hear in the comments how this is working for you, as well as\nyour\n\nideas for how we can make GitLab a better place to do iOS development in\ngeneral.\n\n\nPhoto by eleven_x on [Unsplash](https://unsplash.com/photos/lwaw_DL09S4)\n\n{: .note}\n",[9,232,695],{"slug":3856,"featured":6,"template":700},"ios-publishing-with-gitlab-and-fastlane","content:en-us:blog:ios-publishing-with-gitlab-and-fastlane.yml","Ios Publishing With Gitlab And Fastlane","en-us/blog/ios-publishing-with-gitlab-and-fastlane.yml","en-us/blog/ios-publishing-with-gitlab-and-fastlane",{"_path":3862,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3863,"content":3868,"config":3873,"_id":3875,"_type":14,"title":3876,"_source":16,"_file":3877,"_stem":3878,"_extension":19},"/en-us/blog/it-automation-developer-productivity",{"title":3864,"description":3865,"ogTitle":3864,"ogDescription":3865,"noIndex":6,"ogImage":3552,"ogUrl":3866,"ogSiteName":685,"ogType":686,"canonicalUrls":3866,"schema":3867},"How IT automation impacts developer productivity","See how IT automation promotes a healthier IT culture and unlocks next-level DevOps.","https://about.gitlab.com/blog/it-automation-developer-productivity","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How IT automation impacts developer productivity\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-05-30\",\n      }",{"title":3864,"description":3865,"authors":3869,"heroImage":3552,"date":3870,"body":3871,"category":718,"tags":3872},[715],"2019-05-30","\n\nQuestion: If developers spend the bulk of their days on painful, manual tasks, would you say that’s the best use of their time? In a development environment that is always [trying to do more with less](/topics/devops/reduce-devops-costs/), manual processes are productivity killers.\n\nAutomation makes it possible for engineering talent to use their skills on projects that add real business value and contribute to long-term growth. In the world of QA, test automation is creating a modern strategy [focused on excellent user experiences](/blog/trends-in-test-automation/). IT automation makes it possible to deploy applications faster and increase developer productivity, making the DevOps lifecycle more seamless.\n\n\n## The right people doing the right tasks\n\nIT automation ensures businesses have the right people performing the right tasks, and that has some unexpected benefits. Directing developer talent toward strategic initiatives actually creates a healthier DevOps culture. When developers can work on challenges that are more aligned with their role, they’re likely to be happier and more motivated, and that in turn helps with retention. One of the top reasons developers leave is because [they feel unchallenged in their work](https://differential.com/insights/why-software-developers-leave-and-best-ways-to-retain-them/). IT automation lets developers use their skills for projects where they’re most suited.\n\nThere’s a cost benefit to IT automation, as well. If you have senior engineers working on basic maintenance, [you’re spending too much on maintenance](https://enterprisersproject.com/article/2017/12/5-factors-fueling-automation-it-now), period. Even if you limit these tasks to junior levels, you’re probably still spending too much. While there's a lot more to automation than reducing costs, it's an undeniable benefit.\n\nIf it can be automated, it probably should be.\n\n\n## Automating for growth\n\nAs organizations innovate and increase their deployments, they’ll need IT architecture that supports that growth. Could engineers manually develop and configure 50, 100, or even 200 servers? Sure. But what about 1,000 or 2,000? That’s where IT automation becomes a necessity for scalable workloads. Putting special focus on the handoffs between processes (where waste most often occurs) is how leaders can identify the best automation opportunities. [Value stream mapping](https://www.linkedin.com/pulse/automate-question-ricardo-coelho-de-sousa/) is a method used to uncover what should be fully automated, and what may only need partial automation in the interim.\n\nWithout the right IT automation, growth will undoubtedly suffer as teams need more and more staff to keep up with demand. Automation and collaboration are an essential part of operational efficiency, accelerating delivery, and innovating products. CI/CD is the link that connects developers and operations, and that automation helps developers teams build better software and vastly improves the handoff process.\n\n\n## Minimizing risk\n\nReducing manual work [minimizes the risk of human error](https://techbeacon.com/devops/how-take-architectural-approach-it-automation), which gives IT the ability to focus on mission-critical tasks rather than cleaning up mistakes. IT automation also adds a system of checks and balances, so if a mistake happens, errors can be rolled back painlessly.\n\nAutomation tools and containers can make security more efficient. [Kubernetes](/solutions/kubernetes/) not only manages container deployments, it can also orchestrate security tasks. “You really want automation, orchestration to help manage which containers should be deployed to which hosts … knowing which containers need to access each other; managing shared resources, and monitoring container health,” says Red Hat security strategist Kirsten Newcomer. “[As you scale up your use of containers and microservices, automation soon becomes a core need](https://enterprisersproject.com/it-automation).”\n\nRemoving the human error component gives developers the peace of mind to work at the pace they want.\n\n\n## Keeping up with innovation\n\nSpeaking of speed – in the (not so distant) past, developers had to write docs and notify teammates about changes in the cloud environment, share content about provisioning and de-provisioning, synchronize problems, and exchange emails. All of that took time. The fewer barriers developers have between code and deployment, the better.\n\n[DevOps tools have created a buffer that allows developers and operations teams to work independently](https://www.infoworld.com/article/3230285/how-devops-changes-dev-and-ops.html). Automation is just a continuation of that DevOps journey – developers can work in real time, and operations teams still procure hardware and manage servers, but at a larger scale. Automation works best when you have specific objectives in mind.\n\nThe team at Monkton had a goal: The moment code is checked in and reviewed, they wanted the testing, deployment, and the security vulnerability scanning lifecycles automated. They wanted their people to do what they do best but had a hodgepodge of tools that couldn’t work together. They brought in better tools to automate those processes, tied them into GitLab, and now they have the repeatability they need at the speed they want.\n\n[Read their story](/blog/monkton-moves-to-gitlab-customer-story/).\n{: .alert .alert-gitlab-purple .text-center}\n\nIT automation is what makes next-level DevOps possible and gives developers the opportunity to use their skills in ways that add real, long-term value. When organizations automate mundane, manual tasks, they save costs and create a healthy IT culture where developers are challenged and processes are efficient – a real win-win.\n\nAre you ready to explore the benefits of IT automation and increase developer productivity? [Just commit](/blog/application-modernization-best-practices/).\n\nPhoto by [Daniele Levis Pelusi](https://unsplash.com/photos/Pp9qkEV_xPk?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/automation?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[721,999,9],{"slug":3874,"featured":6,"template":700},"it-automation-developer-productivity","content:en-us:blog:it-automation-developer-productivity.yml","It Automation Developer Productivity","en-us/blog/it-automation-developer-productivity.yml","en-us/blog/it-automation-developer-productivity",{"_path":3880,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3881,"content":3887,"config":3893,"_id":3895,"_type":14,"title":3896,"_source":16,"_file":3897,"_stem":3898,"_extension":19},"/en-us/blog/its-time-to-upgrade-docker-engine",{"title":3882,"description":3883,"ogTitle":3882,"ogDescription":3883,"noIndex":6,"ogImage":3884,"ogUrl":3885,"ogSiteName":685,"ogType":686,"canonicalUrls":3885,"schema":3886},"It's time to update Docker Engine","Now that Alpine Linux 3.14 is being used by more images, it's time to upgrade Docker Engine to 20.10.6 or newer.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669673/Blog/Hero%20Images/engineering.png","https://about.gitlab.com/blog/its-time-to-upgrade-docker-engine","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"It's time to update Docker Engine\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tomasz Maczukin\"}],\n        \"datePublished\": \"2021-08-26\",\n      }",{"title":3882,"description":3883,"authors":3888,"heroImage":3884,"date":3890,"body":3891,"category":718,"tags":3892},[3889],"Tomasz Maczukin","2021-08-26","\n\n[Alpine Linux](https://alpinelinux.org/) distribution is the base OS used by many Linux container images. It provides a handy packaging mechanism, new versions of software, and a quick and predictable release cycle – all while being distributed using a minimal image size. It's used by many very popular container images, for example `docker:dind`,\n[widely used in GitLab CI/CD workloads](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html) handling container\nimages building and management in the jobs.\n\nOn June 15, 2021, Alpine Linux released version 3.14. [As documented in the release notes](\nhttps://wiki.alpinelinux.org/wiki/Release_Notes_for_Alpine_3.14.0#faccessat2), changes in the `musl` library require\nan updated version of [runc](https://github.com/opencontainers/runc) or updated version of\n[Docker](https://github.com/moby/moby) for the Alpine 3.14-based images to work properly.\n\nSoftware products across the computer industry have started migrating their Alpine Linux-based container images to 3.14 since it includes significant updates for various network and security-oriented use cases. In cases where the GitLab Runner environment uses a Docker version older than 20.10.6 to handle new container images based on Alpine 3.14, CI/CD jobs may encounter unexpected problems during execution and cause jobs to fail.\n\n[We encountered this problem](https://gitlab.com/gitlab-org/gitlab/-/issues/335641) at GitLab a few weeks ago, when\nthe `ruby:2.7` image was migrated to use Alpine Linux 3.14 as the base. We used a quick workaround to unlock our\npipelines by explicitly tagging the Alpine 3.13 version of the image (fortunately, it was provided!). To fully\nresolve the problem for all GitLab.com users who use our instance runners, we pushed forward an update to our autoscaled\nVMs base image, which included an update of Docker Engine.\n\n\nOne of the popular and widely used container images that is migrating to Alpine 3.14 [are the `docker` and \n`docker:dind` images](https://github.com/docker-library/docker/pull/317).\nWhat's important is the change will rebuild\nand re-push the existing specific images for supported versions, like `docker:20.10-dind`. This means users\nwho pinned their version of the Docker-in-Docker service in their `.gitlab-ci.yml` files will still get the image\nversion updated to Alpine 3.14. Using a Docker Engine older than 20.10.6 will probably create\nproblems for the user.\n\n## What's the solution?\n\nThe real solution is to upgrade the execution environment accordingly to Alpine's release notes, which state:\n\n> Therefore, Alpine Linux 3.14 requires **at least one** of the following:\n>\n> 1. runc v1.0.0-rc93\n>    - If using Docker's Debian repositories, this is part of containerd.io 1.4.3-2\n>    - If using Docker Desktop for Windows or Mac, this is part of Docker Desktop 3.3.0\n> 1. Docker 20.10.0 (which contains [moby commit a181391](https://github.com/moby/moby/commit/a18139111d8a203bd211b0861c281ebe77daccd9))\nor greater, **AND** libseccomp 2.4.4 (which contains backported [libseccomp commit 5696c89](https://github.com/seccomp/libseccomp/commit/5696c896409c1feb37eb502df33cf36efb2e8e01))\nor greater. In this case, to check if your host libseccomp is faccessat2-compatible, invoke\n`scmp_sys_resolver faccessat2`. If `439` is returned, faccessat2 is supported. If `-1` is returned, faccessat2 is not\nsupported. Note that if runc is older than v1.0.0-rc93, Docker must still be at least version 20.10.0, regardless of\nthe result of this command.\n> 1. As a workaround, in order to run under old Docker or libseccomp versions,\n[the moby default seccomp profile](https://github.com/moby/moby/blob/master/profiles/seccomp/default.json) should be\ndownloaded and on line 2, `defaultAction` changed to `SCMP_ACT_TRACE`, then `--seccomp-profile=default.json` can be\npassed to dockerd, or `--security-opt=seccomp=default.json` passed to `docker create` or `docker run`. This will cause\nthe system calls to return ENOSYS instead of EPERM, allowing the container to fall back to faccessat.\n>\n> Note: When using nested Docker, **every layer** must meet one of the above requirements, since if\n**any layer** improperly denies the use of faccessat2, Alpine Linux 3.14 will not function correctly.\n\nThere are several ways to solve this problem, but since they depend on a specific configuration, users need to choose the solution that best matches their environment.\n\nAlthough the [release notes](https://wiki.alpinelinux.org/wiki/Release_Notes_for_Alpine_3.14.0#faccessat2) mentions Docker 20.10.0, (which brings some needed changes), the release notes also mention that the updated version of libseccomp must be used in this case. For environments that use Docker Engine on Linux, these criteria should be met by Docker Engine 20.10.6 and higher.\n\nThe requirement for nested Docker environments (which in case of GitLab CI/CD mostly means\nthe Docker-in-Docker based jobs) to work properly with images based on Alpine Linux 3.14, both the Docker\nEngine on Runner's host **AND** the `docker:dind` image must be updated to at least 20.10.6.\n\nTo summarize:\n\n1. Users **using images** based on Alpine Linux 3.14 for their job execution (read: as the value of `image:` or\n`services:` keywords in `.gitlab-ci.yml`) must update Docker Engine on their hosts to version 20.10.6 or higher.\n\n1. Users **building images** based on Alpine Linux 3.14 using the Docker-in-Docker approach (read: using\n`services: [docker:X.Y-dind]` and `script: [..., docker build -t my/image ., ...]` in `.gitlab-ci.yml`) must\nalso update the `docker:dind` image version to `docker:20.10.6-dind` or higher.\n\n**For users of GitLab.com instance-level Runners, the upgrade of Docker Engine was completed a few weeks ago. Still, users likely need to update the used Docker-in-Docker service to `docker:20.10.6-dind` or higher.**\n\n## Some temporary workarounds\n\nSince the update of Docker Engine may not be easy in some environments, the only known workaround is to pin used\nimages to versions using Alpine Linux 3.13. As you can see in the [Docker library issue](https://github.com/docker-library/docker/pull/317#issuecomment-880140631), many projects have already found this\nis a problem for their users and provided the versions of images tagged with `-alpine3.13` suffix.\n\nThe Docker-in-Docker case described in this post [was done quite recently](https://github.com/docker-library/docker/pull/327).\nUsers who can't update the Docker Engine on the Runner host or for Docker-in-Docker can temporarily solve\nthe problem by using for example `services: [docker:19.03.15-dind-alpine3.13]`.\n\nRemember that this is only a temporary solution. For example, the official `docker` image\n[have already abandoned the 19.03 line](https://github.com/docker-library/docker/pull/329) and new images for `19.03.x` will\nnot be released.\n\nThe only real, long-term solution is to plan and maintain the upgrade. \n\n",[9,939],{"slug":3894,"featured":6,"template":700},"its-time-to-upgrade-docker-engine","content:en-us:blog:its-time-to-upgrade-docker-engine.yml","Its Time To Upgrade Docker Engine","en-us/blog/its-time-to-upgrade-docker-engine.yml","en-us/blog/its-time-to-upgrade-docker-engine",{"_path":3900,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3901,"content":3906,"config":3911,"_id":3913,"_type":14,"title":3914,"_source":16,"_file":3915,"_stem":3916,"_extension":19},"/en-us/blog/jenkins-one-year-later",{"title":3902,"description":3903,"ogTitle":3902,"ogDescription":3903,"noIndex":6,"ogImage":1200,"ogUrl":3904,"ogSiteName":685,"ogType":686,"canonicalUrls":3904,"schema":3905},"Jenkins: One year later","With new acquisitions and the launch of CloudBees SDM, is Jenkins trying to become another all-in-one?","https://about.gitlab.com/blog/jenkins-one-year-later","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Jenkins: One year later\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-09-20\",\n      }",{"title":3902,"description":3903,"authors":3907,"heroImage":1200,"date":3908,"body":3909,"category":1040,"tags":3910},[715],"2019-09-20","\n\nIt’s been a little over a year since we wrote about [how GitLab CI compares with the three variants of Jenkins](/blog/how-gitlab-ci-compares-with-the-three-variants-of-jenkins/). How have things changed – and how much has stayed the same?\n\n## Acquisitions\n\nIn April 2019, [CloudBees acquired Electric Cloud](https://www.businesswire.com/news/home/20190418005393/en/CloudBees-Acquires-Market-Leader-Electric-Cloud-Creating), a market leader in continuous delivery. This acquisition brought application release automation, continuous delivery, and continuous deployment under the Cloudbees umbrella through two of Electric Cloud’s premier products: ElectricFlow and ElectricAccelarator.\n\nThis acquisition came a little more than a year after [CloudBees acquired Codeship](https://techcrunch.com/2018/02/06/cloudbees-acquires-codeship-as-devops-consolidates/), another startup focused on continuous integration and delivery. These investments in continuous delivery tools are all about creating value. Because Jenkins doesn’t have continuous delivery built-in, it has to offer integrations with other tools (or acquire them) in order to offer that functionality. Acquisitions go a little deeper than just setting up an API, and are a lot more expensive. Could the acquisition of these two CD platforms give Jenkins the ability to offer CI/CD in their core product in the future?\n\n## Jenkins X\n\nThere has been a strong push by certain vendors to create a solution for combined CI/CD to match the capabilities of GitLab. GitHub developed GitHub Actions while CloudBees supported the development of Jenkins X, for example. Jenkins X was developed to automate continuous delivery pipelines to Kubernetes and cloud-native environments. [According to the Jenkins X website](https://jenkins-x.io/), “Rather than having to have deep knowledge of the internals of Jenkins X Pipeline, Jenkins X will default awesome pipelines for your projects that implement fully CI and CD.”\n\n## JenkinsWorld\n\nIn his [opening Keynote at JenkinsWorld 2018](https://www.youtube.com/watch?v=qE3tfS7k1VI&t=2s), CloudBees CTO Kohsuke Kawaguchi discussed some of the known unreliability of Jenkins and discussed how Cloud Native Jenkins could address some of these problems by removing the single point of failure and creating a more distributed system.\n\nAt JenkinsWorld 2019, [CloudBees offered an early preview of its CloudBees SDM Platform](https://www.businesswire.com/news/home/20190814005028/en/CloudBees-Presents-Software-Delivery-Management-SDM--).\n\nSource code management brings visibility and cross-functional collaboration into the SDLC, something that (until now) CloudBees could only offer through a plug-in. This new platform is a part of the CloudBees objective to be an end-to-end platform.\n\nWhat was most interesting was this quote from Sacha Labourey, CEO and co-founder of CloudBees:\n\n>“Organizations need a way to eliminate silos – to truly realize their vision of becoming software-first companies. This vision is Software Delivery Management and we are building the cohesive system our customers want. It will connect product stakeholders and development teams with the rest of the business, provide the intelligence and insights they all need to build software faster and provide increased value to their customers.”\n\nWe couldn’t agree more. ;)\n\n## A push for consolidation\n\nWith the acquisitions of Codeship and Electric Cloud, as well as the announcement of CloudBees SDM, it’s clear that CloudBees/Jenkins is pushing to be an end-to-end SDLC solution for its users. We’re seeing this throughout the industry: Idera purchasing Travis CI, Oracle acquiring Werker, JFrog’s acquisition of Shippable, and the launch of GitHub Actions just last month. Either through acquisitions or adding new features, [the app development industry is in a push for consolidation](/blog/built-in-ci-cd-version-control-secret/).\n\nToolchains get in the way of organizations enabling faster software delivery and realizing their maximum business impact. Where CloudBees/Jenkins has faltered is in its instability, mainly due to the thousands of third-party plugins it supports and the maintenance headaches they cause. At GitLab, we enable SDM, packaging, delivery, monitoring, and security in the product itself without the plugins.\n\nBecause [transparency is one of our values](https://handbook.gitlab.com/handbook/values/), we proudly display other DevOps tools directly on our website with [head-to-head comparisons](/solutions/jenkins/) so that organizations can know which platform works best for their needs.\n\nCompetition makes everyone else better, and with CloudBees/Jenkins amping up their consolidation efforts, how does that compare to us as an already all-in-one platform? We invite you to join us for a demo so you see how GitLab CI/CD compares to Jenkins firsthand.\n\n[See demo of GitLab CI/CD vs. Jenkins](/blog/migrating-from-jenkins/)\n{: .alert .alert-gitlab-purple .text-center}\n",[9,721],{"slug":3912,"featured":6,"template":700},"jenkins-one-year-later","content:en-us:blog:jenkins-one-year-later.yml","Jenkins One Year Later","en-us/blog/jenkins-one-year-later.yml","en-us/blog/jenkins-one-year-later",{"_path":3918,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3919,"content":3924,"config":3929,"_id":3931,"_type":14,"title":3932,"_source":16,"_file":3933,"_stem":3934,"_extension":19},"/en-us/blog/jenkins-to-gitlab-migration-made-easy",{"title":3920,"description":3921,"ogTitle":3920,"ogDescription":3921,"noIndex":6,"ogImage":3435,"ogUrl":3922,"ogSiteName":685,"ogType":686,"canonicalUrls":3922,"schema":3923},"Jenkins-to-GitLab migration made easy","Learn why and how to migrate from Jenkins to GitLab with ease by following this step-by-step guide.","https://about.gitlab.com/blog/jenkins-to-gitlab-migration-made-easy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Jenkins-to-GitLab migration made easy\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2024-02-01\",\n      }",{"title":3920,"description":3921,"authors":3925,"heroImage":3435,"date":3926,"body":3927,"category":741,"tags":3928},[1775],"2024-02-01","GitLab is the most comprehensive AI-powered DevSecOps platform. This means\nthat GitLab provides everything needed to plan, develop, and deliver secure\nsoftware faster, all within one tool.\n\n\nPlatforms take away the pains and struggles of integrating various tools\n(DIY DevOps) to enable the software development lifecycle (SDLC). Since\nJenkins is not a platform, additional tools are required to complete the\nSDLC. This DIY DevOps approach introduces toolchain complexity, which\ncreates the following drawbacks:\n\n\n- Custom support is required for the integration and orchestration of tools\n\n- Difficulty maintaining/upgrading/securing separate tools\n\n- Inefficiency in measuring organizational transformation\n\n- Poor developer experience\n\n- Additional management/time/budget costs\n\n- Loss of productivity\n\n- Context switching and collaboration inefficiencies\n\n\n\u003Ccenter>\n\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175993/Blog/ikr97sr9jclddeqdg7ew.png\" alt=\"Import project selection\">\n   \u003Cfigcaption>DIY DevOps vs. DevSecOps Platform\u003C/figcaption>\n\u003C/figure>\n\n\u003C/center>\n\n\u003Cp>\u003C/p>\n\n\nFor these reasons, many Jenkins teams are considering migrating to a\nDevSecOps platform. If you are looking for a more powerful, reliable, and\nsecure solution, GitLab is your best option! GitLab is free to get started\nwith and offers different subscription tiers based on the needs of your\norganization. To learn more about our offerings and features, check out our\n[pricing page](https://about.gitlab.com/pricing/).\n\n\nIn this blog, you will learn:\n\n- How to plan for a migration\n\n- How to migrate repositories from other source code management (SCM) tools\nto GitLab\n\n- How to migrate CI/CD pipelines from Jenkins to GitLab\n\n- Additional migration considerations\n\n\n### Planning for a migration\n\n\nBefore starting a migration from another tool to GitLab CI/CD, you should\nbegin by developing a migration plan. A migration plan is an important\ntechnical step for setting expectations. CI/CD tools differ in approach,\nstructure, and technical specifics, meaning that migrations are not just 1:1\nmappings of data. A migration plan will provide the following benefits:\n\n- Sets and communicates a clear vision of what your migration goals are,\nwhich helps your users understand why the effort is worth it. The value is\nclear when the work is done, but people need to be aware while it’s in\nprogress too.\n\n- Provides sponsorship and alignment from the relevant leadership teams\nhelps with the point above.\n\n- Spends time educating users on what’s different.\n\n- Finds ways to sequence or delay parts of the migration and prevent\nnon-migrated (or partially migrated) states for too long.\n\n- Documents advantages of the improvements that GitLab CI/CD offers, and\nupdates your implementation as part of the transition.\n\n\nA migration plan will allow you to put a process in place where you can\nslowly migrate to GitLab with minimal disruption. This may include running\nboth Jenkins and GitLab, while certain projects are moved to GitLab and\noffloaded from Jenkins.\n\n\n### Defining a change management process\n\n\nThe migration plan should define an effective change management process.\nDevelopers, IT Operations, Cloud Administrators, Security, and Quality\nEngineers may not have experience with GitLab and they may not know why you\nor your leadership have decided to move in this direction.\n\n\nThe people this is impacting need to know:\n\n- __Why__ the change is being made\n\n- __What__ the future state looks like\n\n- __How__ the company intends to get there from here\n\n- __Where__ to go for more information or help \n\n\nTo this end, you should consider the following steps to manage change across\nthese functional roles: \n\n- __Analyze the current state__: Document the current state of processes.\nGather metrics as a baseline. Identify what's working and not working with\nCI/CD by interviewing key team members. Document the challenges you uncover\nboth quantitatively and qualitatively. You’re going to have to sell the\nvision and reason for the change, so the more clearly you can define the\nproblem set, the easier it will be to gain buy-in from across the business. \n\n- __Establish a vision__: Now that you have current pain points outlined\nquantitatively with baseline metrics and qualitatively (in the words of your\nteam members), communicate a vision of the future state. Explain why it's\nimportant (tie this to business success metrics). Provide live and recorded\ndemonstrations of what good looks like and compare it to the current state.\nReinforce this message through multiple channels and media — chat groups,\nall-hands meetings, email notifications, banner notifications on GitLab,\netc.\n\n- __Educate the workforce__: Invest in [GitLab CI/CD\nTraining](https://about.gitlab.com/services/education/gitlab-ci/) delivered\nby a GitLab expert. Measure knowledge acquisition and retention using\n[GitLab Certifications](https://levelup.gitlab.com/pages/certifications). \n\n- __Communicate roadmap and resources__: Communicate to your team members\nthe intended timeline, available resources to help them transition, and\ncommunity resources like chat groups, Q&A boards, or GitLab Influencer\noffice hours so they can ask questions and get help. Bonus points for\nbuilding a reward system to incentivize teams to transition early and share\ntheir experience with their peer application groups!\n\n\nIf you have these elements in place as you begin this transition, you will\nhave a framework for success. \n\n\n### Establishing migration goals\n\nBefore performing a migration, you should have a good understanding of your\ngoals and how to meet them. For example, some questions you should have\nanswers to are as follows:\n\n- What is your timeline to migrate?\n\n- How is your Jenkins server currently configured?\n\n- How many projects must be migrated?\n\n- What is the complexity of your pipeline?\n\n- Does it require external dependencies, multiple pipeline triggers,\nparallel builds, etc.?\n\n- How/Where do you deploy your code?\n\n- What is the release/review process for deploying code?\n\n- Is it integrated into Jenkins, or a separate workflow triggered by\nJenkins?\n\n- Which build artifacts or binaries are required for pipeline success?\n\n- Which plugins are used by jobs in Jenkins today?\n\n- Which software is installed on the Jenkins agents?\n\n- What SCM solution are you currently using?\n\n- Are there any shared libraries in use within your Jenkins jobs?\n\n- Which authentication method is used for Jenkins (Basic auth, LDAP/AD,\nSSO)?\n\n- Are there other projects that you need to access from your pipeline?\n\n- Are there credentials in Jenkins used to access outside services?\n\n\nBy answering these questions you’ll know how to proceed with the migration,\nhow long it will take, and where to start. Once you have built a plan and\nare confident of the expectations and possible pitfalls, you can begin the\nmigration process.\n\n\n### Prerequisites for migration\n\nOnce you have created a migration plan and addressed all the expectations of\nthe migration, you can begin to set up GitLab. Some of the prerequisites\nsuggested for migration are as follows:\n\n- Get familiar with GitLab. Read about the [key GitLab CI/CD\nfeatures](https://docs.gitlab.com/ee/ci/index.html).\n\n- Follow tutorials to create your first [GitLab\npipeline](https://docs.gitlab.com/ee/ci/quick_start/index.html) and [more\ncomplex pipelines](https://docs.gitlab.com/ee/ci/quick_start/tutorial.html)\nthat build, test, and deploy a static site.\n\n- Review the [.gitlab-ci.yml keyword\nreference](https://docs.gitlab.com/ee/ci/yaml/index.html).\n\n- Set up and configure GitLab.\n\n- Test your GitLab instance.\n\n\nOnce you understand GitLab and an instance has been configured, you can work\nthrough your migration plan and begin to move projects from Jenkins over to\nGitLab. Make sure your GitLab instance has been properly set up using GitLab\nbest practices and [reference\narchitectures](https://docs.gitlab.com/ee/administration/reference_architectures/).\n\n\n### Migrating repositories to GitLab\n\nOne of the main drawbacks of Jenkins is that it does not provide an SCM\nsolution. If you are using Jenkins, your code must be stored in a separate\nSCM solution which Jenkins must have access to. Because GitLab has built-in\nSCM, migrating away from Jenkins also allows you to migrate from the SCM\nsolution you were leveraging, bringing forth an additional reduction in\ncosts.\n\n\nGitLab provides tools to allow you to easily move your repository and its\nmetadata into GitLab. The following importers are included to assist in\nmigrating your projects to GitLab:\n\n\n- [GitHub](https://docs.gitlab.com/ee/user/project/import/github.html)\n\n- [Another GitLab\ninstance](https://docs.gitlab.com/ee/user/project/settings/import_export.html)\n\n- [Bitbucket\nCloud](https://docs.gitlab.com/ee/user/project/import/bitbucket.html)\n\n- [Bitbucket\nServer](https://docs.gitlab.com/ee/user/project/import/bitbucket_server.html)\n\n- [FogBugz](https://docs.gitlab.com/ee/user/project/import/fogbugz.html)\n\n- [Gitea](https://docs.gitlab.com/ee/user/project/import/gitea.html)\n\n- [Jira (Issues\nonly)](https://docs.gitlab.com/ee/user/project/import/jira.html)\n\n- [Repo by manifest\nfile](https://docs.gitlab.com/ee/user/project/import/manifest.html)\n\n- [Repo by\nURL](https://docs.gitlab.com/ee/user/project/import/repo_by_url.html)\n\n\n\u003Ccenter>\n\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176002/Blog/ie2xrexhbcoq6m8rnhit.png\" alt=\"GitHub to GitLab Repo Exporter\">\n   \u003Cfigcaption>GitHub to GitLab Repo Exporter\u003C/figcaption>\n\u003C/figure>\n\n\u003C/center>\n\n\u003Cp>\u003C/p>\n\n\nEach importer imports different data from a project. Read the [import and\nmigrate projects\ndocumentation](https://docs.gitlab.com/ee/user/project/import/) to learn\nmore about the provided importers to see what data is migrated to GitLab.\nAdditionally, you can [automate group and project\nimport](https://docs.gitlab.com/ee/user/project/import/#automate-group-and-project-import)\nand build a custom solution to further suit the needs of your organization:\n\n\n- [Professional Services](https://about.gitlab.com/services/)\n\n- [Migration\nUtilities](https://gitlab.com/gitlab-org/professional-services-automation/tools/migration/congregate/-/blob/master/docs/using-congregate.md#quick-start)\n\n- [Frequently Asked Migration\nQuestions](https://gitlab.com/gitlab-org/professional-services-automation/tools/migration/congregate/-/blob/master/customer/famq.md)\n\n\n### How to migrate a repository\n\nMigrating a repository to GitLab is easy using our built-in importers. In\nthis example, I’ll show how to copy a repo from GitHub to GitLab along with\n[its\nresources](https://docs.gitlab.com/ee/user/project/import/github.html#imported-data)\n(Issues, Pull Requests, Milestones, etc.). In order to migrate a repository\nfrom another GitHub to GitLab, you can follow the steps below:\n\n\n1. On the left sidebar, at the top, select **Create new (+)**.\n\n2. Select **New project/repository** under the In GitLab section.\n\n3. Select **Import project**.\n\n\n\u003Ccenter>\n\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176017/Blog/boowmmaqhbredxa3g92s.png\" alt=\"Import project selection\">\n   \u003Cfigcaption>Import project selection\u003C/figcaption>\n\u003C/figure>\n\n\u003C/center>\n\n\u003Cp>\u003C/p>\n\n\n4. Click the **GitHub** button.\n    - If using GitLab self-managed, then you must [enable the GitHub importer](https://docs.gitlab.com/ee/administration/settings/import_and_export_settings.html#configure-allowed-import-sources).\n    - Note that other importers can be initiated in the same way.\n5. Now you can either:\n    - Authorize with GitHub OAuth: Select **Authorize with GitHub**.\n    - Or, use a GitHub personal access token:\n       - Go to [https://github.com/settings/tokens/new](https://github.com/settings/tokens/new).\n       - In the **Note** field, enter a token description.\n       - Select the repo scope.\n       - Optionally to import collaborators, select the              **read:org** scope.\n       - Click the **Generate token** button.\n       - On the GitLab import page, in the Personal Access Token field, paste the GitHub personal access token.\n6. Click the **Authenticate** button.\n\n7. Select the items you wish to migrate.\n\n8. Select the projects you wish to migrate and to where.\n\n9. Press the **Import** button.\n\n\nNow you should have the imported project in your workspace. For additional\nguidance on migrating from GitHub to GitLab you can watch this video:\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/0Id5oMl1Kqs?si=TQ5HI9aMwtzJMiMi\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\nOnce you have completed the repository migration, you can set your Jenkins\npipeline to leverage the Jenkinsfile within GitLab. This can be done by\nsetting the repository URL via to your newly imported project via the Jenkin\npipeline configuration menu:\n\n\n\u003Ccenter>\n\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176020/Blog/mu475liw66abcxbu2g6g.png\" alt=\"Jenkins Pipeline SCM settings\">\n   \u003Cfigcaption>Jenkins Pipeline SCM settings\u003C/figcaption>\n\u003C/figure>\n\n\u003C/center>\n\n\u003Cp>\u003C/p>\n\n\nThis is useful for the initial repo migration phase and allows you to use\nboth Jenkins and GitLab in parallel, preventing service disruptions while\nyou work on migrating the CI/CD functionality.\n\n\nAdditionally, you can leverage the [GitLab Jenkins\nplugin](https://plugins.jenkins.io/gitlab-plugin/) to assist with migration.\nThis plugin allows GitLab to trigger and obtain the status of Jenkins\nbuilds.\n\n\n### Migrating CI/CD pipelines\n\nOnce you have migrated your repositories to GitLab, you can proceed to\nmigrate your Jenkins pipelines to GitLab. This process can be fairly\nstraightforward, but requires an understanding of both Jenkins and GitLab\nconcepts and syntax.\n\n\nJenkins provides two different types of syntax for defining pipelines,\nDeclarative and Scripted. In this guide we will be covering migrating from\nDeclarative pipelines since they are the most commonly used.\n\n\n### Step-by-step pipeline migration\n\nIn this tutorial we will analyze a Jenkinsfile (Groovy) alongside a GitLab\nCI/CD configuration file (YAML) that builds, tests, and deploys a\nmicroservice written in Golang. We will then proceed to enable the pipeline\nwithin GitLab and see its results. The pipeline will:\n\n\n- Use the golang container image with the **alpine** tag\n\n- Run a job for building the Golang code into an executable binary\n   - Stores the built executable as an artifact\n- Run a job to run unit tests\n\n- Run a job to deploy to staging\n   - Only executes if the commit targets the **staging** branch\n   - Starts after the **test** stage succeeds\n   - Uses the built executable artifact from the earlier job\n\nBelow you can see Jenkins and GitLab pipeline definitions along with\ndescriptive comments. You can see the pipeline in action in the [Meow\nMigration\nproject](https://gitlab.com/gitlab-de/projects/blogs/meow-migration).\n\n\nLet's take a look at a Jenkinsfile written in Groovy:\n\n\n```  \n\n// The top-level of the declarative\n\n// pipeline.\n\npipeline {\n\n  // Defines the default agent to use\n  // when it is not explicitly defined\n  // in a job.\n    agent any\n\n  // Defines the stages that will run\n  // in numerical order. Each stage\n  // only runs one job.\n    stages {\n\n    // Defines the name of the stage\n        stage('build') {\n      // Defines the container image to\n      // use for this job, overwriting\n      // the default 'agent any'.\n      // The Jenkins Docker plugin\n      // must be configured for this\n      // to run.\n            agent { docker 'golang:alpine' }\n\n      // Defines the sequence of steps\n      // to execute when the stage is\n      // run.\n            steps {\n                sh 'go build -o bin/meow-micro'\n                sh 'chmod +x bin/meow-micro'\n            }\n\n      // The steps to run after the\n      // stage completes.\n            post {\n              always {\n\n        // Stores the stage artifacts\n        // generated for use in another\n        // job.\n                archiveArtifacts artifacts: 'bin/meow-micro'\n                onlyIfSuccessful: true\n              }\n            }\n        }\n\n    stage('test') {\n            agent { docker 'golang:alpine' }\n            steps {\n                sh 'go test .'\n            }\n        }\n\n        stage('deploy') {\n      // Defines conditions which must\n      // be met in order for the job to\n      // execute. In this case the\n      // deploy job will only run on the \n      // staging branch.\n            when {\n              branch 'staging'\n            }\n            steps {\n                echo 'Deploying meow-micro to staging'\n        // Uses the artifact stored in\n        // the build stage.\n                sh './bin/meow-micro'\n            }\n        }\n    }\n}\n\n```\n\n\nNow, let's see how to create the same functionality in GitLab:\n\n\n```\n\n# Defines the default image to use\n\n# when it is not explicitly defined in\n\n# a job.\n\ndefault:\n  image: alpine:latest\n\n# Defines the order to run the stages.\n\n# Each stage can have multiple jobs.\n\nstages:\n  - build\n  - test\n  - deploy\n\n# Defines the name of the job\n\ncreate-binary:\n # Defines the stage the job will run in\n  stage: build\n # Defines the container image to use\n # for this job, overwriting default.\n  image: golang:alpine\n # Defines the sequence of steps to\n # execute when the job is run.\n  script:\n    - go build -o bin/meow-micro\n    - chmod +x bin/meow-micro\n # Stores the job artifacts generated\n # for use in another job.\n  artifacts:\n    paths:\n      - bin/meow-micro\n    expire_in: 1 week\n\nunit-tests:\n  stage: test\n  image: golang:alpine\n  script:\n    - go test .\n # Defines commands to run after the\n # job.\n after_script:\n  - echo \"Tests Complete\"\n\nstaging-deploy:\n  stage: deploy\n # Defines commands to run before the\n # actual job.\n  before_script:\n    - apk update\n  script:\n    - echo \"Deploying meow-micro to staging environment\"\n    - ./bin/meow-micro\n # Defines conditions which must be met\n # in order for this job to execute. In\n # this case the staging-deploy job will \n # only run on the staging branch.\n  rules:\n    - if: $CI_COMMIT_BRANCH == 'staging'\n # Allows the artifact stored in the\n # build job to be used in this job.\n  artifacts:\n    paths:\n      - bin/meow-micro\n```\n\n\nAs you may have observed, there are many similarities between both Jenkins\nand GitLab in terms of syntax, making pipeline migration straightforward.\nWhile the above provides a basic example, be sure to read the comprehensive\nlist of [feature and concept\ncomparisons](https://docs.gitlab.com/ee/ci/migration/jenkins.html#comparison-of-features-and-concepts)\nbetween both tools.\n\n\nNow that we have an understanding of how to map Jenkins to GitLab we can\nstart creating a pipeline with the same functionality in GitLab. In order to\nperform the migration of CI/CD, you can go through the following steps:\n\n\n##### 1. Open the repository you migrated to GitLab in the section above.\n\n- On the left sidebar, at the top, select **Search or go to…**.\n\n- Locate your project.\n\n\n##### 2. Open the [Pipeline\nEditor](https://docs.gitlab.com/ee/ci/pipeline_editor/).\n\n- On the left sidebar, Select **Build > Pipeline editor**.\n\n\u003Ccenter>\n\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176026/Blog/ecp4jh7epho2oxuegaor.png\" alt=\"Pipeline editor menu\">\n   \u003Cfigcaption>Pipeline editor menu\u003C/figcaption>\n\u003C/figure>\n\n\u003C/center>\n\n\u003Cp>\u003C/p>\n\n\n- Click the **Configure pipeline** button.\n\n\n\u003Ccenter>\n\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176029/Blog/nypfh01zhwgvzqc0xz3v.png\" alt=\"Configure pipeline selection\">\n   \u003Cfigcaption>Configure pipeline selection\u003C/figcaption>\n\u003C/figure>\n\n\u003C/center>\n\n\u003Cp>\u003C/p>\n\n\n##### 3. Populate the [.gitlab-ci.yml](https://docs.gitlab.com/ee/ci/yaml/).\n\n- Add the GitLab CI pipeline code. \n\n\u003Ccenter>\n\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176031/Blog/nxi6uxxispyyoiiyvxyg.png\" alt=\"Pipeline editor input\">\n   \u003Cfigcaption>Pipeline editor input\u003C/figcaption>\n\u003C/figure>\n\n\u003C/center>\n\n\u003Cp>\u003C/p>\n\n\n- Verify that the syntax is correct.\n\n\n\u003Ccenter>\n\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176037/Blog/x3d4utfsnymye0lvphtf.png\" alt=\"Pipeline syntax validation\">\n   \u003Cfigcaption>Pipeline syntax validation\u003C/figcaption>\n\u003C/figure>\n\n\u003C/center>\n\n\u003Cp>\u003C/p>\n\n\n- Visualize the pipeline.\n\n\n\u003Ccenter>\n\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176043/Blog/hipzofpyywjxf62edzfv.png\" alt=\"Pipeline visualization\">\n   \u003Cfigcaption>Pipeline visualization\u003C/figcaption>\n\u003C/figure>\n\n\u003C/center>\n\n\u003Cp>\u003C/p>\n\n\n##### 4. Commit the file to the main branch.\n\n- Add a commit message.\n\n- Make sure the branch is set to main.\n\n- Click the Commit changes button.\n\n\n\u003Ccenter>\n\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176048/Blog/nn8bl7rdysabccoycfrk.png\" alt=\"Commit changes dialog\">\n   \u003Cfigcaption>Commit changes dialog\u003C/figcaption>\n\u003C/figure>\n\n\u003C/center>\n\n\u003Cp>\u003C/p>\n\n\nOnce the file has been merged, the defined pipeline will kick off. You can\ngo back to your project and [view the\npipeline](https://docs.gitlab.com/ee/ci/pipelines/#view-pipelines) in action\nby selecting it under your project’s **Build > Pipelines** page. Since it\nwas run on the **main** branch, you will see only the **create-binary** and\nunit-tests jobs; the **staging-deploy** job only runs on the staging branch.\n\n\n\u003Ccenter>\n\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176051/Blog/wfb4k8nkzpg28kpf2pzz.png\" alt=\"Pipeline running on main branch\">\n   \u003Cfigcaption>Pipeline running on main branch\u003C/figcaption>\n\u003C/figure>\n\n\u003C/center>\n\n\u003Cp>\u003C/p>\n\n\nIf we create a staging branch, we can see that the following pipeline is\ninitiated.\n\n\n\u003Ccenter>\n\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176053/Blog/e2jxedpolaniotgixpby.png\" alt=\"Pipeline running on staging branch\">\n   \u003Cfigcaption>Pipeline running on staging branch\u003C/figcaption>\n\u003C/figure>\n\n\u003C/center>\n\n\u003Cp>\u003C/p>\n\n\nWhen clicking on a job we can see its output:   \n\n\n\u003Ccenter>\n\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176056/Blog/fywzwbzkwcvc9zzakilh.png\" alt=\"create-binary job output\">\n   \u003Cfigcaption>create-binary job output\u003C/figcaption>\n\u003C/figure>\n\n\u003C/center>\n\n\u003Cp>\u003C/p>\n\n\n\u003Ccenter>\n\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176061/Blog/ekmpd8ecanwwiena9xi9.png\" alt=\"unit-tests job output input\">\n   \u003Cfigcaption>unit-tests job output input\u003C/figcaption>\n\u003C/figure>\n\n\u003C/center>\n\n\u003Cp>\u003C/p>\n\n\n\u003Ccenter>\n\n\u003Cfigure>\n   \u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176065/Blog/h7nqxszy50xdmnvhalfq.png\" alt=\"staging-deploy job output\">\n   \u003Cfigcaption>staging-deploy job output\u003C/figcaption>\n\u003C/figure>\n\n\u003C/center>\n\n\u003Cp>\u003C/p>\n\n\nYou can see how the artifact is stored in the create-binary job and used in\nthe staging-deploy job. And that's how easy it is to migrate a pipeline from\nJenkins to GitLab!\n\n\n### Additional considerations when migrating\n\nSome helpful considerations we’ve found to make the deployment process more\nstraightforward are as follows:\n\n\n- Don't try to replicate tasks into GitLab jobs 1:1. Take some inventory and\ntime to understand what the current pipeline is doing, and which problem it\nis solving.\n\n\n- Some Jenkins jobs may be too complex to move over to GitLab right away.\nFor this reason, it may be beneficial to use the [GitLab Jenkins\nplugin](https://plugins.jenkins.io/gitlab-plugin/) to initiate Jenkins\npipelines and view their results directly from GitLab. This allows you to\nslowly migrate certain actions to GitLab until the whole pipeline can be\nmoved.\n\n\n- Implement [security scanners and code\nquality](https://docs.gitlab.com/ee/user/application_security/) using\nbuilt-in templates provided by GitLab from the start. This will allow you to\nshift security left, reducing the potential for a breach.\n\nDon't overcomplicate the CI/CD config and try to use every feature advantage\nat once. Modularize code and implement it in small iterations.\n\n\n- Implement monitoring and governance from the start.\n\n\n- Understand that the GitLab Runner (Go) might behave differently than the\nJenkins agent (Java). CPU usage and memory consumption might differ — make\nsure to compare over time.\n\n\n- Consider investing in auto-scaling mechanisms, and shut down unneeded\nresources on the weekend, or outside of working hours.\n\n\n- Modernize application development by containerizing your jobs. Jenkins\njobs are not executed on a container today but on a Jenkins agent running as\na VM.\n\n\nWhile this list is not exhaustive, it does provide a good start on some\nconsiderations to take note of. If you need additional help, GitLab provides\n[professional services](https://about.gitlab.com/get-help/) to support your\nmigration journey.\n\n\n### Learn more\n\nThanks for reading! I hope this guide has helped you get a clear\nunderstanding of why and how to migrate from Jenkins to GitLab. Not\nconvinced? [Give GitLab a try with our free\ntrial](https://about.gitlab.com/free-trial/), and see the value of a\nDevSecOps platform!\n\n\nHere are a few resources where you can learn more about GitLab, the benefits\nof using a DevSecOps platform, and migrating from Jenkins:\n\n\n- [Migrating from\nJenkins](https://docs.gitlab.com/ee/ci/migration/jenkins.html)\n\n- [Planning a\nmigration](https://docs.gitlab.com/ee/ci/migration/plan_a_migration.html)\n\n- [GitLab Project\nImporters](https://docs.gitlab.com/ee/user/project/import/)\n\n- [Tutorial: GitHub to GitLab migration the easy\nway](https://about.gitlab.com/blog/github-to-gitlab-migration-made-easy/)\n\n- [Video: GitHub to GitLab migration the easy\nway](https://youtu.be/0Id5oMl1Kqs?feature=shared)\n\n- [Jenkins to GitLab: The ultimate guide to modernizing your CI/CD\nenvironment](https://about.gitlab.com/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment/)\n",[9,696],{"slug":3930,"featured":91,"template":700},"jenkins-to-gitlab-migration-made-easy","content:en-us:blog:jenkins-to-gitlab-migration-made-easy.yml","Jenkins To Gitlab Migration Made Easy","en-us/blog/jenkins-to-gitlab-migration-made-easy.yml","en-us/blog/jenkins-to-gitlab-migration-made-easy",{"_path":3936,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3937,"content":3942,"config":3948,"_id":3950,"_type":14,"title":3951,"_source":16,"_file":3952,"_stem":3953,"_extension":19},"/en-us/blog/kubecon-na-2019-are-you-about-to-break-prod",{"title":3938,"description":3939,"ogTitle":3938,"ogDescription":3939,"noIndex":6,"ogImage":1200,"ogUrl":3940,"ogSiteName":685,"ogType":686,"canonicalUrls":3940,"schema":3941},"KubeCon NA: Are you about to break Prod?","Use Pulumi and GitLab to build a pipeline that validates your application, infrastructure, and deployment process.","https://about.gitlab.com/blog/kubecon-na-2019-are-you-about-to-break-prod","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"KubeCon NA: Are you about to break Prod?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Erin Krengel, Pulumi\"}],\n        \"datePublished\": \"2020-01-27\",\n      }",{"title":3938,"description":3939,"authors":3943,"heroImage":1200,"date":3945,"body":3946,"category":783,"tags":3947},[3944],"Erin Krengel, Pulumi","2020-01-27","\n\nA couple of months ago, my [Pulumi](https://www.pulumi.com/) colleague Sean Holung, staff sofware engineer, and I had the opportunity to present [\"Are you about to break prod? Acceptance Testing with Ephemeral Environments\"](https://www.youtube.com/watch?v=jAQhDZiRzBQ) at KubeCon NA 2019. In this talk, we covered what is an ephemeral environment, how to create one, and then we walked the audience through a concrete example. Given our limited time, we had to move quickly through a ton of information. This post will recap our presentation and add a few more details we weren't able to cover.\n\nAs software engineers, our job is to deliver business value. To do this, we need to be delivering software both quickly and reliably.\n\nSo the question we ask you is: are you about to break prod? Everyone will break production at some point because there are things we miss. As independent software lead Alexandra Johnson sums up so well in a tweet: \"Failures are part of the cost of building and shipping large systems.\" Building a robust pipeline allows us to move quickly in the case of failure and gain confidence around making changes to our infrastructure and applications.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">Big takeaway from \u003Ca href=\"https://twitter.com/hashtag/KubeCon?src=hash&amp;ref_src=twsrc%5Etfw\">#KubeCon\u003C/a>: none of us want to break prod, but failures are part of the cost of building and shipping large systems. Using tools like \u003Ca href=\"https://twitter.com/hashtag/AcceptanceTesting?src=hash&amp;ref_src=twsrc%5Etfw\">#AcceptanceTesting\u003C/a> (\u003Ca href=\"https://twitter.com/eckrengel?ref_src=twsrc%5Etfw\">@eckrengel\u003C/a>) and \u003Ca href=\"https://twitter.com/hashtag/ChaosEngineering?src=hash&amp;ref_src=twsrc%5Etfw\">#ChaosEngineering\u003C/a> (\u003Ca href=\"https://twitter.com/Ana_M_Medina?ref_src=twsrc%5Etfw\">@Ana_M_Medina\u003C/a>) can increase your confidence in your infrastructure changes!\u003C/p>&mdash; Alexandra Johnson (@alexandraj777) \u003Ca href=\"https://twitter.com/alexandraj777/status/1198373475049623552?ref_src=twsrc%5Etfw\">November 23, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nWith this in mind, we use Pulumi and GitLab to build a pipeline that validates both our application, infrastructure, and deployment process. \n\n## Ephemeral environments\n\nWhat is an ephemeral environment? It is a short-lived environment that mimics a production environment. To maintain agility, boundaries are defined in the environment to only encompass the first-level dependencies of the particular microservice that is being deployed. It means you don't have to spin up every single microservice or piece of infrastructure that's running in production. Yet you may need to spin up extra pieces of infrastructure to properly test the microservice. For example, you may need to create a subscription to pull from a PubSub topic your microservice writes to. This subscription would allow your acceptance tests to pull from a topic in order to validate an outbound message is published.\n\n## Why this is important\n\nInfrastructure is a key part of an application's behavior. The architecture and requirements are continually evolving. How can you incorporate these into a testing suite to give us a high degree of confidence?\n\nEphemeral environments allow you to integrate infrastructure and deployment processes into a testing suite. They ensure your testing environment is always in-sync with production and therefore allow you to iterate quickly to meet new requirements.\n\nEphemeral environments also encourage you to lean on automated tests over manual tests. If you use ephemeral environments as a replacement for a testing environment, there is not enough time to go in and run a manual check. Shifting your mindset to automated tests can be challenging, yet it's imperative that we do so. Automated tests guarantee your application behaves as expected today as well as months from now when you're out on vacation.\n\n## Our demo application\n\nTo demonstrate the effectiveness of integrating acceptance testing with ephemeral environments into your deployment process, we created a simple demo application. The service is written in Go and accepts a message on the `/message` endpoint, then places it in a storage bucket and sends a notification about the new object on a PubSub topic. The code for this application lives in our [main.go](https://gitlab.com/rocore/demo-app/blob/master/main.go) file. While you can walk through this code yourself, the most important thing to call out is that our application is *configurable*. This means we take configuration in at the very beginning of our main function and shut down the application if the values are not present.\n\n```go\nfunc main() {\n    ...\n\t// Get configuration from environment variables. These are\n\t// required configuration values, so we use an helper\n\t// function get the values and exit if the value is not set.\n\tproject := getConfigurationValue(\"PROJECT\")\n\ttopicName := getConfigurationValue(\"TOPIC\")\n\tbucketName := getConfigurationValue(\"BUCKET\")\n    ...\n}\n\nfunc getConfigurationValue(envVar string) string {\n\tvalue := os.Getenv(envVar)\n\tif value == \"\" {\n\t\tlog.Fatalf(\"%s not set\", envVar)\n\t}\n\tlog.Printf(\"%s: %s\", envVar, value)\n\treturn value\n}\n```\n\n### Infrastructure\n\nThere are many pieces of infrastructure to spin up and we can use Pulumi to easily wire it all together. Our architecture looks like this:\n\n![Pulumi Architecture](https://about.gitlab.com/images/blogimages/pulumidemoarch.jpg){: .medium.center}\n\nYou can check out the Pulumi code that we use to reproduce both our ephemeral environments as well as production in the [infrastructure/index.ts](https://gitlab.com/rocore/demo-app/blob/master/infrastructure/index.ts) file. The neat thing about using Pulumi is that we can create the Google Cloud Platform (GCP) resources we need and then directly reference them in our Kubernetes deployment. Using Pulumi ensures we're always configuring our application with the correct GCP resources for that environment.\n\nFor example, in our Kubernetes deployment, we set the environment variables by using the topic and bucket variables created just above.\n\n```typescript\n// Create a K8s Deployment for our application.\nconst appLabels = { appClass: name };\nconst deployment = new k8s.apps.v1.Deployment(name, {\n    metadata: { labels: appLabels },\n    spec: {\n        selector: { matchLabels: appLabels },\n        template: {\n            metadata: { labels: appLabels },\n            spec: {\n                containers: [{\n                    ...\n                    env: [\n                        { name: \"TOPIC\", value: topic.name }, // referencing topic just created\n                        { name: \"BUCKET\", value: bucket.name }, // referencing bucket just created\n                        { name: \"PROJECT\", value: project },\n                        {\n                            name: \"GOOGLE_APPLICATION_CREDENTIALS\",\n                            value: \"/var/secrets/google/key.json\"\n                        },\n                    ],\n                    ...\n                }]\n            }\n        }\n    },\n});\n```\n\n### Acceptance tests\n\nThe acceptance tests validate that our service, when stood up, functions as expected. They are run against an ephemeral environment. The tests live in the `acceptance/acceptance_test.go` [file](https://gitlab.com/rocore/demo-app/blob/master/acceptance/acceptance_test.go). You'll notice we're once again using the helper function `getConfigurationValue`. Our acceptance test must also be configured to ensure they're validating against the correct resources for that particular ephemeral environment.\n\nSince the service is only accessible from within the Kubernetes cluster, we use a Kubernetes job to run our acceptance tests. Using a Kubernetes job is a good technique to use when your CI is running externally, such as from GitLab, and you do not want to expose your service publicly. Our ephemeral environment plus acceptance test looks like this:\n\n![Acceptance Tests](https://about.gitlab.com/images/blogimages/pulumiacceptancetestarch.jpg){: .medium.center}\n \nWe spin up a Kubernetes Job and additional resources by using an if statement at the bottom of our `infrastructure/index.ts` file. The conditional depends on the environment's name as follows:\n\n```typescript\n// If it's a test environment, set up acceptance tests.\nlet job: k8s.batch.v1.Job | undefined;\nif (ENV.startsWith(\"test\")) {\n    job = acceptance.setupAcceptanceTests({\n        ...\n    });\n}\n\n// Export the acceptance job name, so we can get the logs from our\n// acceptance tests.\nexport const acceptanceJobName = job ? job.metadata.name : \"unapplicable\";\n```\n\nThat covers all the major aspects of our application and infrastructure, and if you'd like to view the code in detail, it is available in our `demo-app` [GitLab repository](https://gitlab.com/rocore/demo-app).\n\n## Our pipeline\n\nWhen developing a new service, we must establish a solid deployment strategy upfront. We want to make sure we're building in quality from day one. As we develop the service, we can add acceptance tests for every feature we add while the context and requirements are still fresh in our minds. This ensures we have thorough coverage of our app's functionality.\n\nWe used GitLab to set up our pipeline. We chose GitLab because it's straightforward to set up and allows us to run our pipeline on our Docker image of choice. We use a [base-image](https://gitlab.com/rocore/global-infra/blob/master/base-image/Dockerfile) that has all our dependencies installed and then reference that Docker image and tag in our `demo-app` pipeline. The Docker image allows us to bundle and version the dependencies for building our application and infrastructure.\n\n![GitLab Pipelines](https://about.gitlab.com/images/blogimages/pulumibloggitlabci.png){: .shadow.medium.center}\n \n1. **Test and Build** - This runs our unit tests and builds both our application and acceptance test images. To build our images, we used [Kaniko](https://github.com/GoogleContainerTools/kaniko), a tool for building images within a container or Kubernetes cluster. GitLab has excellent documentation on [how to incorporate Kaniko](https://docs.gitlab.com/ee/ci/docker/using_kaniko.html) into your pipeline. The application image is an immutable image that is used for both running our acceptance tests and deploying to production.\n1. **Acceptance Test** - This is what spins up our ephemeral environments and runs our acceptance tests. This acts as a quality gate catching issues before production.\n\n    Our ephemeral environment and Kubernetes job are all spun up in the `script` portion of the acceptance test job definition. We do a bit of setup for our new acceptance test stack and then run `pulumi up`. Here is the print out from our acceptance tests.\n\n    ```bash\n    ...\n    $ pulumi stack init rocore/$ENV-app\n    Logging in using access token from PULUMI_ACCESS_TOKEN\n    Created stack 'rocore/test-96425413-app'\n    $ pulumi config set DOCKER_TAG $DOCKER_TAG\n    $ pulumi config set ENV $ENV\n    $ pulumi config set gcp:project rocore-k8s\n    $ pulumi config set gcp:zone us-west1-a\n    $ pulumi up --skip-preview\n    Updating (rocore/test-96425413-app):\n    ...\n    Resources:\n        + 16 created\n\n    Duration: 4m10s\n\n    Permalink: https://app.pulumi.com/rocore/demo-app/test-96425413-app/updates/1\n    ```\n\n    The `after_script` destroys our stack as well as prints the logs of both our Kubernetes job and deployment, which help with debugging if our tests were to fail. We use the `after_script` to make sure that we always clean up and print logs even when our acceptance tests fail.\n    \n    ```bash\n    ...\n    $ pulumi stack select rocore/$ENV-app\n    $ kubectl logs -n rocore --selector=appClass=$ENV-demo-app-acc-test --tail=200\n    === RUN   TestSimpleHappyPath\n    === RUN   TestSimpleHappyPath/message_is_sent_to_PubSub_topic\n    === RUN   TestSimpleHappyPath/message_is_stored_in_bucket\n    ",[874,1228,827,9,232,278],{"slug":3949,"featured":6,"template":700},"kubecon-na-2019-are-you-about-to-break-prod","content:en-us:blog:kubecon-na-2019-are-you-about-to-break-prod.yml","Kubecon Na 2019 Are You About To Break Prod","en-us/blog/kubecon-na-2019-are-you-about-to-break-prod.yml","en-us/blog/kubecon-na-2019-are-you-about-to-break-prod",{"_path":3955,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3956,"content":3962,"config":3966,"_id":3968,"_type":14,"title":3969,"_source":16,"_file":3970,"_stem":3971,"_extension":19},"/en-us/blog/leading-scm-ci-and-code-review-in-one-application",{"title":3957,"description":3958,"ogTitle":3957,"ogDescription":3958,"noIndex":6,"ogImage":3959,"ogUrl":3960,"ogSiteName":685,"ogType":686,"canonicalUrls":3960,"schema":3961},"Leading SCM, CI and Code Review in one application","The most important tools for developers are SCM, CI and Code Review, and it is better to have them all together.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679607/Blog/Hero%20Images/scm-ci-cr.png","https://about.gitlab.com/blog/leading-scm-ci-and-code-review-in-one-application","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Leading SCM, CI and Code Review in one application\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2020-09-30\",\n      }",{"title":3957,"description":3958,"authors":3963,"heroImage":3959,"date":2212,"body":3964,"category":978,"tags":3965},[1835],"\n\n{::options parse_block_html=\"true\" /}\n\n\n\nGitLab enables streamlined CI, code reviews and collaboration at proven enterprise scale, making development workflows easier to manage and minimizing context switching required between tools in complex DevOps toolchains. Users can release software faster and outpace the competition with the ability to quickly respond to changes in the market.\n\nWatch this short video (3 minutes) to see a demo of the seamless flow developers having when using SCM, CI and Code Review in GitLab.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/DvuqGA4FhXM\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n\nCover image by [NESA by Makers](https://unsplash.com/@nesabymakers) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[9,1489],{"slug":3967,"featured":6,"template":700},"leading-scm-ci-and-code-review-in-one-application","content:en-us:blog:leading-scm-ci-and-code-review-in-one-application.yml","Leading Scm Ci And Code Review In One Application","en-us/blog/leading-scm-ci-and-code-review-in-one-application.yml","en-us/blog/leading-scm-ci-and-code-review-in-one-application",{"_path":3973,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3974,"content":3980,"config":3986,"_id":3988,"_type":14,"title":3989,"_source":16,"_file":3990,"_stem":3991,"_extension":19},"/en-us/blog/lee-tickett-my-gitlab-journey",{"title":3975,"description":3976,"ogTitle":3975,"ogDescription":3976,"noIndex":6,"ogImage":3977,"ogUrl":3978,"ogSiteName":685,"ogType":686,"canonicalUrls":3978,"schema":3979},"From user, to advocate, to contributor: my GitLab journey","Three years (as a user and as a contributor) with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681735/Blog/Hero%20Images/cover_photo.jpg","https://about.gitlab.com/blog/lee-tickett-my-gitlab-journey","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"From user, to advocate, to contributor: my GitLab journey\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lee Tickett\"}],\n        \"datePublished\": \"2020-11-13\",\n      }",{"title":3975,"description":3976,"authors":3981,"heroImage":3977,"date":3983,"body":3984,"category":978,"tags":3985},[3982],"Lee Tickett","2020-11-13","{::options parse_block_html=\"true\" /}\n\n\n\n\nI have had a passion for technology since before I can remember. Thirteen\nyears ago I took the plunge, quit my day job, and started an IT development\nand support company called [Tickett Enterprises\nLimited](https://www.tickett.net). For the last three years, GitLab has been\na part of my journey.\n\n\n## 3 Years Ago \n\nWe were (and still are) using a helpdesk system we built ourselves. It does\nexactly what we need it to do - and any time it doesn’t, we change it. The\nmost important feature of the system is reporting. Specifically,\nfacilitating our monthly billing process; with a click of a button, we\ngenerate timesheets and invoices for all of our clients.\n\n\nThough I was aware of Git (and GitHub), I had not heard of GitLab. We were\nusing SVN in its most basic form (single repository for all projects and no\nbranching), with an integration so all commits would create notes in our\nhelpdesk.\n\n\n## 2.5 Years Ago\n\nWe decided that SVN was no longer fit for purpose. Our top issues were: \n\n* never knowing whether the code in our repository matched what was deployed\n\n* not being able to work collaboratively on projects\n\n* feature/knowledge limitations\n\n* Git was the industry standard \n\n\nWhile most of these issues were due to the way we were using SVN, we were\nkeen to adopt a more popular system. I don’t remember how I found GitLab,\nbut I did, and spun up a local on-prem instance of Community Edition (CE)\nusing separate projects/repositories and basic branching. If you are\nconsidering running a local instance, I recommend the [Bitnami\nappliance/.ova](https://bitnami.com/stack/gitlab).\n\n\nIt took some time to get used to local vs remote and to remember to push as\nwell as commit, but we picked it up pretty quickly.\n\n\n## 2 Years Ago\n\nWe wanted to use GitLab to help us improve our processes so we:\n\n* built a little UI for project creation (using the GitLab API). This\nensures new projects fit our naming standards, contain our standard template\nfiles, have our standard master/test/dev branches, contain the relevant\nmembers, and use our webhooks\n\n* recreated the helpdesk integration we had with SVN (every commit and\ncomment is replicated as a note on our helpdesk)\n\n* unaware of GitLab EE, we created a custom merge request approval process\nusing webhooks. Our master branch is always protected - a merge request\nrequires 2 approvals from 2 distinct reviewers (one for code and one for\nfunctionality)\n\n\n## 1.5 Years Ago\n\nA bit late to the party, but finally we set up the GitLab runner to automate\nour build, spin up our database, execute our unit tests and report test\ndetails and code coverage. GitLab CI for .NET was not as well documented as\nother use cases leading to a lot of trial and error when setting up the\nrunner.\n\n\nWe are using the Windows runner configured to use a standard shell (which I\nthink is no longer supported). We will either be moving to powershell on\nwindows or possibly using docker images. Here’s a sample .gitlab-ci.yml\n\n\n```yml\n\nstages:\n  - build\n  - test\n\nvariables:\n  CI_DEBUG_TRACE: \"false\"\n  ASSEMBLY_VERSION: \"1.0.4\"\n  \nbuild:\n stage: build\n script:\n  - 'C:\\Windows\\Microsoft.NET\\Framework\\v4.0.30319\\nuget restore'\n  - '\"C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\BuildTools\\MSBuild\\15.0\\bin\\msbuild\" /t:Restore,Clean,ReBuild /t:Database:Publish /p:Configuration=Debug;Platform=\"Any CPU\" /p:SqlPublishProfilePath=Database.publish.xml'\n  - 'ping 192.168.99.99 -n 1 -w 10000 2>nul || type nul>nul'\n artifacts:\n  paths:\n   - Tests/bin/\n\ntest:\n stage: test\n script:\n  - 'c:\\GitLab-Runner\\opencover\\OpenCover.Console.exe -returntargetcode:1000 -filter:\"+[*]* -[nunit*]* -[*Tests*]*\" -register -target:\"C:\\Program Files (x86)\\NUnit.org\\nunit-console\\nunit3-console.exe\" -targetargs:\"Tests\\Tests.csproj --result=testresult.xml;transform=C:\\gitlab-runner\\nunit3-junit.xslt\"'\n coverage: '/^Visited Branches .*(\\(\\d+\\.?\\d*\\))/'\n dependencies:\n  - build\n artifacts:\n  reports:\n   junit: testresult.xml\n```\n\n\nWe were building another customization to allow us to search for code across\nall repositories. Unfortunately, we hit a limitation because the API did not\nallow searching anything but the default branch.\n\n\nAt this point, while Googling for help getting CI up and running, I learned\nthat GitLab is open-source. So I thought maybe I could extend the API to\nsupport searching any branch. This lead to [my first\ncontribution](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/28069).\n\n\n## 1 Year Ago\n\nAt this point, I was completely new to all of the technologies, techniques,\nand best practices used by GitLab but found myself participating in my first\n[GitLab hackathon](https://about.gitlab.com/community/hackathon/). Somehow,\nI managed to take joint first prize!\n\n\nMy first few contributions were achieved by modifying my production GitLab\ninstallation (not ideal). So it was time to get the [GitLab Development Kit\n(GDK)](https://gitlab.com/gitlab-org/gitlab-development-kit) up and running.\nThis was certainly not without its challenges (many of which I suspect stem\nfrom me being in the minority of GitLab contributors running Windows).\n\n\nI have since contributed to the [GDK\nproject](https://gitlab.com/gitlab-org/gitlab-development-kit) and joined\nthe GDK office hour calls to help shape the way forward and resolve some of\nthe problems and frustrations.\n\n\nAt this point, I was leearning a lot. Not just about the tools and languages\nbut about the best practices and work ethos within the GitLab team. Better\nyet, I was able to start taking some of these learnings back to the office.\n\n\n## 0.5 Years Ago\n\nI attended GitLab Commit - London 2019. This really helped to confirm my\nsuspicions; we are only scraping the surface of GitLab's capabilities.\n\n\nOn a few occasions, I wondered whether GitLab may not be a good fit for my\ncompany as I watched huge companies like Porsche and Goldman Sachs present.\nA [presentation](https://www.youtube.com/watch?v=t0Eh1sq9r5s) by Huss\nEl-Sheikh from startup 9fin helped ease my concerns.\n\n\nAround this time, I moved from Windows to Ubuntu to make it easier to work\nwith GDK.\n\n\nI continued to learn a lot from my contributions, feedback, and interactions\nwith the GitLab team, again applying what I could back in the office. Much\naround the languages/technologies I hadn’t previously worked with (namely\nruby, postgres and vue), but also other takeaways such as:\n\n* when carrying out code reviews ask questions rather than give instructions\n(“what do you think about x?” is more productive than “change this to y”)\n\n* GitLab CI is capable of automating a lot of what we currently do by hand\n(e.g. code review for best practices)\n\n* always try to add tests when making code changes\n\n\nI am a firm believer of documenting processes, decisions, and rationale.\nThere’s nothing worse than someone saying “we do it this way” without being\nable to back that up with reasoning. With that in mind, we implemented Merge\nRequest Templates to ensure our team was consistent in our approach to\ncoding, testing, and releasing.\n\n\nBy now our development team had plenty of experience with GitLab and we were\nstarting to move our support team over. To help our team leads monitor merge\nrequests, we adopted 2 simple departmental labels (`Support`/`Development`)\nand used our webhook engine to ensure every MR is automatically labelled.\n\n\n## Today / What’s Next\n\nIn preparation for a transition to .NET core, deprecation of the Windows\nshell runner and a desire to start testing our frontend (web), I started\nputting a CI script together using docker and the\nmcr.microsoft.com/dotnet/core/sdk:latest image. The .gitlab-ci.yml looks\nlike;\n\n\n```yml\n\nstages:  \n  - build\n  - test\n\nvariables:\n  CI_DEBUG_TRACE: \"false\"\n  ASSEMBLY_VERSION: \"1.0.1\"\n\nbuild:\n stage: build\n tags:\n  - docker\n script:\n  - 'dotnet build'\n\ntest:\n stage: test\n tags:\n  - docker\n script:\n  - 'nohup dotnet run --project Web &'\n  - 'apt-get update'\n  - 'apt-get install -y unzip'\n  - 'wget https://chromedriver.storage.googleapis.com/83.0.4103.14/chromedriver_linux64.zip'\n  - 'unzip chromedriver_linux64.zip -d ~/'\n  - 'rm chromedriver_linux64.zip'\n  - 'mv -f ~/chromedriver /usr/local/bin/chromedriver'\n  - 'chown root:root /usr/local/bin/chromedriver'\n  - 'chmod 0755 /usr/local/bin/chromedriver'\n  - 'wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add -'\n  - 'sh -c ''echo \"deb https://dl.google.com/linux/chrome/deb/ stable main\" >> /etc/apt/sources.list.d/google.list'''\n  - 'apt-get update'\n  - 'apt-get install -y google-chrome-stable'\n  - 'dotnet test -l:trx Tests/Tests.csproj /p:CollectCoverage=true'\n coverage: '/Total\\s*\\|.*\\|\\s(\\d+\\.?\\d*)%\\s*\\|.*\\|/'\n```\n\n\nAnd the tests look something like;\n\n\n```c#\n    public class UiTests : IDisposable\n    {\n        private readonly Process _webServerProcess;\n        private readonly IWebDriver _driver;\n\n        [Fact]\n        public void ClickNavPrivacyPolicy()\n        {\n            _driver.Navigate()\n                .GoToUrl(\"http://localhost:5000/\");\n\n            var link = _driver.FindElement(By.LinkText(\"Privacy\"));\n            link.Click();\n\n            Assert.Equal(\"http://localhost:5000/Home/Privacy\", _driver.Url);\n        }\n\n        public UiTests()\n        {\n            ChromeOptions chromeOptions = new ChromeOptions();\n            chromeOptions.AddArguments(\"headless\", \"no-sandbox\");\n            _driver = new ChromeDriver(chromeOptions);\n\n            if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux)) return;\n\n            _webServerProcess = new Process\n            {\n                StartInfo = {\n                    WorkingDirectory = Path.Combine(System.AppDomain.CurrentDomain.BaseDirectory, \"..\", \"..\", \"..\", \"..\", \"Web\"),\n                    FileName = $\"dotnet.exe\",\n                    Arguments = \" run\",\n                    UseShellExecute = true,\n                }\n            };\n            _webServerProcess.Start();\n        }\n\n        private void KillWebServer()\n        {\n            if (_webServerProcess != null && !_webServerProcess.HasExited)\n            {\n                _webServerProcess.Kill();\n            }\n        }\n\n        public void Dispose()\n        {\n            _driver.Dispose();\n            KillWebServer();\n        }\n    }\n```\n\n\nYou can see some conditional code in there which allows Selenium tests to\nwork both locally on our development machines and remotely on our GitLab\nrunner. If you have a better way of achieving this, please leave a comment.\nI would love to chat and learn!\n\n\nI also want to start introducing some linting like we see in the GitLab\nproject to enforce rules around code formatting (spaces, carriage returns,\nindentation, etc.). I have started to look at JetBrains Resharper (R#)\ncommand-line but haven’t had enough time to implement it yet. Ideally. I\nwould like to start with just a rule or two and then slowly introduce more,\nbut it looks quite tricky to take this approach. Please let me know if\nyou’ve been able to achieve this!\n\n\nI would also like to lose our helpdesk and start using GitLab issues,\nservice desk, timelogs, etc. I am working on identifying the gaps and\nworking with the product managers to understand whether it is realistic to\nfill those gaps within the GitLab product. Alternatively, I will be looking\nto build some additional “bolt-ons” using webhooks and the API.\n\n\nWhile investigating gaps, I stumbled upon the [GitLab-Triage\nproject](https://gitlab.com/gitlab-org/gitlab-triage) and I expect we'll use\nthis to automate various workflows. I managed to help close a few issues and\neven create a few additional features which would make it work for us by\n[contributing to the GitLab-Triage\nproject](https://gitlab.com/gitlab-org/gitlab-triage/-/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&author_username=leetickett).\n\n\nWe also added more labels (`needs code review` & `needs functional review`)\nfor our merge request approval process now. We can see where we are and what\nneeds to be done at a glance. We previously relied on an MR checklist that\nwe are deprecating.\n\n\n![Merge request\nchecklist](https://about.gitlab.com/images/blogimages/lee-tickett-my-gitlab-journey/mr_checklist.png)\n\n\n![Merge requests with\nlabels](https://about.gitlab.com/images/blogimages/lee-tickett-my-gitlab-journey/merge_requests_with_labels.png)\n\n\n## Contributing to GitLab \n\n\nI am very proud to have joined the GitLab Core Team. Thanks to everyone who\nhas held my hand and patiently assisted me with contributions. \n\n\nWith the release of Microsoft Windows Subsystem for Linux v2, I have gone\nback to running Windows on my laptop with GDK running in Ubuntu on WSL2.\nThis is working brilliantly for me at the moment (the way Visual Studio Code\nhandles things especially is really cool).\n\n\nI now have 95 [merged merge\nrequests!](https://gitlab.com/dashboard/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&author_username=leetickett)\nand have been helping several others get started contributing (getting GDK\nup and running etc). Once this crazy pandemic is over and we can start to\nsocialise again, I would like to try and start some sort of local\nmeetup/group.\n\n\nI would like to help make it easier to connect GitLab users. I have visions\nof a mechanism to search for others based:\n\n* the size of their user base \n\n* the languages they are using\n\n* the feature they are using\n\n\nAt present, we have several tools (Gitter, Issues, Forum etc) but there is a\nstrong reliance on being engaged and stumbling on questions/support\nrequests. I suspect many of us would be happy to have other users reach out\ndirectly.\n\n\nIf you need any more information around:\n\n* getting your development environment/tools setup on Windows 10\n\n* getting CI working with .NET and SQL Server projects\n\n* building customisations using GitLab webhooks and API\n\n\n...or would like to see a demo of anything discussed above, I would be happy\nto oblige!\n\n\nI would love to connect with others who are either looking to, or already\nusing GitLab for:\n\n* .NET projects\n\n* customer helpdesk \n\n* customer billing (using timelogs)\n\n\nThanks for reading! Here's a picture of me and the family repping with our\nGitLab merch!\n\n\n![The tickett family repping\nGitLab](https://about.gitlab.com/images/blogimages/lee-tickett-my-gitlab-journey/landing_page.png)\n",[9,268,763,875,827,721,873],{"slug":3987,"featured":6,"template":700},"lee-tickett-my-gitlab-journey","content:en-us:blog:lee-tickett-my-gitlab-journey.yml","Lee Tickett My Gitlab Journey","en-us/blog/lee-tickett-my-gitlab-journey.yml","en-us/blog/lee-tickett-my-gitlab-journey",{"_path":3993,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3994,"content":4000,"config":4005,"_id":4007,"_type":14,"title":4008,"_source":16,"_file":4009,"_stem":4010,"_extension":19},"/en-us/blog/look-back-on-11-11-cicd",{"title":3995,"description":3996,"ogTitle":3995,"ogDescription":3996,"noIndex":6,"ogImage":3997,"ogUrl":3998,"ogSiteName":685,"ogType":686,"canonicalUrls":3998,"schema":3999},"Looking back on the 11.x releases for GitLab CI/CD","With GitLab 12.0 coming soon, it's a great time to reflect on all the features we've launched since 11.0.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666857/Blog/Hero%20Images/photo-cicdlookback.jpg","https://about.gitlab.com/blog/look-back-on-11-11-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Looking back on the 11.x releases for GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2019-06-19\",\n      }",{"title":3995,"description":3996,"authors":4001,"heroImage":3997,"date":4002,"body":4003,"category":718,"tags":4004},[803],"2019-06-19","\nGitLab releases each month, so if you aren't paying close attention it can be easy to\nlose track of all the great features that are coming out. With an eye towards [CI/CD](/solutions/continuous-integration/)\nin particular, I'd like to take you through some of the highlights in each of our 11.x releases,\neach of which contributed to our strategy around cloud native CI/CD that has\nsecurity and smarts built right in, supports code reusability and live troubleshooting,\nand in general enables your team to make progress towards your goal of better, more\nreliable software delivery.\n\n![Release Badges](https://about.gitlab.com/images/blogimages/11x_release_logos.png){: .shadow.medium.center}\n\nFor those who don't know me, I'm the director of product for CI/CD and I've spent\nmy career (going all the way back to doing build automation of Windows 98 at my\nfirst corporate job) out of doing build and release automation and process. I love\nthis stuff, and my career move from building CI/CD implementations to building\nCI/CD tools for folks just like me has been one of the most rewarding things I've\ndone in my life. I hope that experience and passion comes through in the features\nwe've delivered – either way, I'd love to chat with you if you're a user of GitLab\nCI/CD. DM me on [Twitter](https://twitter.com/j4yav) or contact me via my [GitLab profile](https://gitlab.com/jyavorska) if you'd like to chat.\n\nAnyway, without further ado let's dive into the first 11.x release!\n\n## [GitLab 11.0](/releases/2018/06/22/gitlab-11-0-released/)\n\n### Auto DevOps Generally Available\n\nWe kicked off the 11.0 series in June 2018 by launching [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/).\nBeyond making it easy to host and collaborate on public and private repositories,\nGitLab also simplifies the rest of the process by offering the whole delivery toolchain,\nbuilt in and automated: Simply commit your code and Auto DevOps can do the rest.\nAuto DevOps is a pre-built, fully featured CI/CD pipeline that takes the best of\nGitLab CI/CD features, adds a lot of smarts around auto-detecting what's in your\nproject, and automates the entire delivery process to your Kubernetes cluster.\n\nCheck out our [quick-start guide](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html)\nif you haven't had a chance to play with it yet – you might be surprised what it's\ncapable of out of the box.\n\n![Auto DevOps](https://about.gitlab.com/images/11_0/auto-devops.png){: .shadow.medium.center}\n\n### Job logs in the Web IDE\n\nTying operational deployments/execution together with development is also a priority\nfor GitLab. In 11.0 we made the CI status of the current commit available in the status\nbar of the Web IDE, and made it possible to view the [status and the logs for each job on the right](https://docs.gitlab.com/ee/user/project/web_ide/#view-ci-job-logs).\nThis made it easy to fix a merge request with CI failures by opening the failed job\nright alongside your code.\n\n![Web IDE trace](https://about.gitlab.com/images/11_0/web_ide_ci_trace.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [CI/CD pipeline jobs integrated with the Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/#view-ci-job-logs)\n- [Variable-defined deployment policies for Canary deployments](https://docs.gitlab.com/ee/topics/autodevops/#deploy-policy-for-canary-environments)\n- [Specify deployment strategy from Auto DevOps settings](https://docs.gitlab.com/ee/topics/autodevops/#auto-deploy)\n\n---\n\n## [GitLab 11.1](/releases/2018/07/22/gitlab-11-1-released/)\n\n### Security reports in pipeline view\n\nSecurity was another important area of focus for us throughout the 11.x series. We\nalready had security reports in the MR before this release, but here we also\nadded status for branches so this information can be acted upon even earlier.\nGitLab 11.1 (July 2018) completed the [set of security reports shown in the pipeline view](https://docs.gitlab.com/ee/user/project/merge_requests/#security-reports),\nadding both Container Scanning and DAST. From there you could now simply review\nthe Reports tab to access all security information and take action.\n\n![Security Reports](https://about.gitlab.com/images/11_1/security_reports.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Redesign of the merge request and pipeline info sections](https://docs.gitlab.com/ee/user/project/merge_requests/)\n- [Improved Kubernetes cluster page design](https://docs.gitlab.com/ee/user/project/clusters/)\n\n---\n\n## [GitLab 11.2](/releases/2018/08/22/gitlab-11-2-released/)\n\n### Custom templates at the instance level\n\nIn 11.2 (August 2018) we also introduced [custom templates at the instance level](https://docs.gitlab.com/ee/administration/custom_project_templates.html),\nmaking it easy for organizations to set up a basic template for how they want\ntheir CI/CD pipelines to run. Development teams can grab a copy of the template\nand go, confident their following their organizational processes. Our enterprise\ncustomers are very important to us, and this feature came directly from the great\nfeedback we get from our customers.\n\n![Project Templates](https://about.gitlab.com/images/11_2/project-templates-instance.png){: .shadow.medium.center}\n\n### Kaniko for Docker Builds\n\nHistorically, building Docker images within a containerized environment had\nrequired compromises, using techniques like docker-in-docker on privileged\ncontainers. These solutions were often insecure and slow. In this release we\nmade the Runner compatible with [Kaniko](https://docs.gitlab.com/ee/ci/docker/using_kaniko.html),\na new tool developed by Google which is able to securely build an image within\nan unprivileged container. Cloud-first build technology is so important for the\njourney we want to take with our users, and supporting these kinds of foundational\ntechnologies that make your life easier are so nice to deliver.\n\n![Kaniko](https://about.gitlab.com/images/11_2/kaniko.png){: .shadow.medium.center}\n\n### JUnit test results in merge requests\n\nFinally, testing will always be an important part of any CI/CD pipeline. With the 11.2 release,\nwe made it possible to [see JUnit test results directly](https://docs.gitlab.com/ee/ci/testing/unit_test_reports.html)\nright from the CI view in the merge request widget, as part of our ongoing efforts\nto invest in full-spectrum integrated testing within GitLab.\n\n![JUnit Results](https://about.gitlab.com/images/feature_page/screenshots/junit-test-summaries-MR-widget.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [GitLab Runner in cloud native Helm Chart](https://docs.gitlab.com/charts/)\n- [Built-in project templates switched to use Dockerfiles](https://docs.gitlab.com/ee/user/project/working_with_projects.html#create-a-project)\n- [Manually stop an environment](https://docs.gitlab.com/ee/ci/environments/index.html#stopping-an-environment)\n\n---\n\n## [GitLab 11.3](/releases/2018/09/22/gitlab-11-3-released/)\n\n### Built-in Maven package repository\n\nFor any development organization, having an easy and secure way to manage\ndependencies is critical. Package management tools, such as Maven for Java\ndevelopers, provide a standardized way to share and version control these\nlibraries across projects. In GitLab 11.3 (September 2018), we opened up [Maven repositories built directly into GitLab](https://docs.gitlab.com/ee/user/packages/maven_repository/index.html).\nJava developers were now easily able to publish their packaged libraries to\ntheir project’s Maven repository: Just share a simple XML snippet with\nother teams looking to utilize that library, and Maven and GitLab will take care\nof the rest.\n\n![Maven Repo](https://about.gitlab.com/images/11_3/maven.png){: .shadow.medium.center}\n\n### Interactive Web Terminals\n\nCI/CD jobs are executed in the runner as part of pipelines, but this execution wasn't interactive.\nWhen they failed, it wasn't always easy to dig into details to spot the source of the problem.\n[Interactive web terminals](https://docs.gitlab.com/ee/ci/interactive_web_terminal/)\nbrought the capability to connect to a running or completed job and manually enter\ncommands to understand what’s happening in the system, and helped us move the story\nforward on empowering teams to deliver code, troubleshoot, and solve issues directly.\n\n![Web Terminal](https://about.gitlab.com/images/11_3/verify-webterm.png){: .shadow.medium.center}\n\n### Better includes with `extends` keyword\n\nReusing CI/CD code is a great way to help ensure consistency in software delivery,\nand also minimizes the amount of per-job scripting that’s needed to write and\nmaintain. As of 11.11, we began offering a powerful alternative approach\nfor code reuse in templates using [YAML `extends` keywords](https://docs.gitlab.com/ee/ci/yaml/#extends),\nexpanding upon our vision for reusability and compliance in the enterprise.\n\n![Extends](https://about.gitlab.com/images/11_3/verify-includes.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html)\n- [Auto DevOps enabled by default](https://docs.gitlab.com/ee/topics/autodevops/)\n- [Custom file templates for self-managed instances](https://docs.gitlab.com/ee/administration/settings/instance_template_repository.html)\n\n---\n\n## [GitLab 11.4](/releases/2018/10/22/gitlab-11-4-released/)\n\n### Feature Flags\n\nFeature Flags are a no-brainer to make software deliver easier, so you knew we'd eventually\nwant to include them in the GitLab single application. With the 11.4 release (October 2018) we delivered on\nthis promise by adding [Feature Flags](https://docs.gitlab.com/ee/operations/feature_flags.html),\nhelping teams to achieve continuous delivery by offering better options for incrementally\nrolling out changes and separating feature delivery from customer launch.\n\n![Feature Flags](https://about.gitlab.com/images/11_4/feature_flags.png){: .shadow.medium.center}\n\n### `only/except` rules for changes to files\n\nA very popular requested feature, in 11.4 we added the ability within the\n`.gitlab-ci.yml` to [use `only`/`except` rules for jobs](https://docs.gitlab.com/ee/ci/yaml/#only--except)\nbased on when modifications occur to a specific file or path (or glob). This allowed\nfor even more smarts in the pipeline, especially for monorepo/microservice-type\nuse cases, where the pipeline behavior can be optimized based on the changed files\nin the repository.\n\n![Only Except Changes](https://about.gitlab.com/images/11_4/verify-onlyexceptchanges.png){: .shadow.medium.center}\n\n### Timed incremental rollouts\n\nTeams already had the ability within Auto DevOps to set up incremental rollouts,\nbut with this release we added an option to also set up [timed incremental rollouts](https://docs.gitlab.com/ee/topics/autodevops/#timed-incremental-rollout-to-production)\nwhere the rollout will automatically continue forward on a timed cadence, making\nsure there is no error before continuing. This helped us push our vision for safe,\ncontinous deployment forward by providing teams with a new tool to have control over\ntheir code rollouts.\n\n![Timed Incremental Rollouts](https://about.gitlab.com/images/11_4/timed_incremental_rollouts.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Moving `includes` from Starter to Core](https://docs.gitlab.com/ee/ci/yaml/#include)\n- [Auto DevOps support for RBAC](https://docs.gitlab.com/ee/topics/autodevops/)\n- [Filter admin runners view by type/state](https://docs.gitlab.com/ee/ci/runners/)\n- [Support for interactive web terminals with Docker executor](https://docs.gitlab.com/ee/ci/interactive_web_terminal/)\n- [Delayed jobs for pipelines](https://docs.gitlab.com/ee/ci/yaml/#whendelayed)\n\n---\n\n## [GitLab 11.5](/releases/2018/11/22/gitlab-11-5-released/)\n\n### Access control for Pages\n\nWith the 11.5 release (November 2018) we delivered a fantastic community-contributed feature which enabled\naccess control for Pages. From now on, instead of only supporting use cases where the\ncontent associated with the product is public, you could use Pages to build and\npublish protected content that should [only be accessible by project members](https://docs.gitlab.com/ee/user/project/pages/introduction.html#gitlab-pages-access-control).\nOperational documentation, internal secrets, or even just private planning or\nother information can now be confidently published via your pipelines in an\neasy-to-access way, with confidence that only the right people are able to see it.\n\n![Access Control Pages](https://about.gitlab.com/images/11_5/access-control-pages.png){: .shadow.medium.center}\n\n### Deploy Knative to your Kubernetes cluster\n\nBuilding [serverless applications](/topics/serverless/) enables teams to focus their time on making a\ngreat product and eliminates the need of provisioning, managing, and operating\nservers. Starting in GitLab 11.5, we enabled [deploying Knative to your existing Kubernetes cluster](https://docs.gitlab.com/ee/update/removals.html)\nwith a single click using the GitLab Kubernetes integration. Knative is a\nKubernetes-based platform to build, deploy, and manage modern serverless workloads.\nTasks that were once difficult, such as source-to-container builds, routing and\nmanaging traffic, and scaling-to-zero, now work effortlessly out of the box.\n\n![KNative](https://about.gitlab.com/images/11_5/knative.png){: .shadow.medium.center}\n\n### Parallel attribute for faster pipelines\n\nThe speed to delivery in a CI/CD environment can oftentimes be limited by the time it takes to complete the various tests in order to ensure the code is able to be shipped. With the `parallel` keyword in GitLab CI/CD, teams can quickly and easily parallelize these tests – accelerating the testing process and overall time to delivery.\n\n![Parallel](https://about.gitlab.com/images/11_5/parallel-keyword.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Review Apps can now link directly to changed pages](https://docs.gitlab.com/ee/ci/environments/index.html#going-from-source-files-to-public-pages)\n- [New CI/CD syntax for security, quality, and performance report types](https://docs.gitlab.com/ee/ci/yaml/#artifactsreports)\n- [Additional information about deployments in merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/index.html#pipeline-status-in-merge-requests)\n\n---\n\n## [GitLab 11.6](/releases/2018/12/22/gitlab-11-6-released/)\n\n### GitLab Serverless\n\nBuilding on the Knative integration introduced in the previous month, 11.6's new, more\ncomprehensive [Serverless](https://docs.gitlab.com/ee/update/removals.html)\ncapability enabled users to easily define functions in their repository and have\nthem served and managed by Knative. Cloud native is such an important part of our\nroadmap, and it was really exciting to launch this feature while I was at KubeCon\nno less.\n\nBy simply defining your function data in the repo’s `serverless.yml` file and\nusing a `.gitlab-ci.yml` template, each function will be deployed to your cluster,\nwith Knative taking care of scaling your function based on request volume. This\nenables application developers to iterate quickly without having to worry about\nprovisioning or managing infrastructure.\n\n![Serverless](https://about.gitlab.com/images/11_6/serverless.png){: .shadow.medium.center}\n\n### Run pipeline jobs for merge requests\n\nRunning a given job only when dealing with a merge request was made much easier in 11.6. Using the\n`merge_requests` value with `only/except` keywords will allow you to configure jobs\nto run [only or except when in the context of a merge request](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html).\nThis allows finer control over pipeline behavior, and also provides access to new\nenvironment variables indicating the target branch and merge request ID to be used\nfor additional automated behaviors.\n\n![Merge Request Pipelines](https://about.gitlab.com/images/11_6/verify-mergerequestpipelines.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Kubernetes clusters for groups](https://docs.gitlab.com/ee/user/group/clusters/)\n- [Pipelines are now deletable via API](https://docs.gitlab.com/ee/api/pipelines.html#delete-a-pipeline)\n- [Trigger variables are now hidden in UI by default](https://docs.gitlab.com/ee/ci/triggers/)\n\n---\n\n## [GitLab 11.7](/releases/2019/01/22/gitlab-11-7-released/)\n\n### Releases page\n\nThe 11.7 release (January 2019) added the ability to [create releases in GitLab](https://docs.gitlab.com/ee/user/project/releases/index.html)\nand view them on a summary page. Releases are a snapshot in time of the source,\nlinks, and other metadata or artifacts associated with a released version of your\ncode, and helps users of your project to easily discover the latest releases\nof your software.\n\nThis is a feature that was, as a career release manager, near and dear to my heart.\nI have so many plans around [Release Orchestration](/direction/release/release_orchestration/)\nthat build on this feature as a foundation. Being able to tie a milestone to\na release, a feature coming very soon, will open the door to tying together all\nkinds of interesting things happening in GitLab to a release. This isn't my forward-looking\nblog post so I won't go too far here, but I'll just say I can't wait to\ngo on that journey to build something really unique and powerful together with our users.\n\n![Releases Page](https://about.gitlab.com/images/11_7/release-releases_page.png){: .shadow.medium.center}\n\n### Expand upstream/downstream pipelines across projects\n\nWith 11.7 it became possible to [expand upstream or downstream cross-project pipelines](https://docs.gitlab.com/ee/ci/pipelines/index.html#visualize-pipelines)\nright from the pipeline view, giving you visibility into your end-to-end pipelines,\nno matter in which project they start or finish. It's one pattern we've been seeing\nmore and more of in GitLab, and we're adding more features to support. The reality of\ncontinuous delivery is complex orchestration across projects and even groups, so\nthis is a feature that was nice to get out the door to help make this easier.\n\n![Cross-Project Pipelines](https://about.gitlab.com/images/11_7/release-pipeline_expansion.png){: .shadow.medium.center}\n\n### NPM package repository\n\nIn January we also started offering [NPM registries](https://docs.gitlab.com/ee/user/packages/npm_registry/index.html)\nbuilt directly into GitLab. From this point teams can share a simple package-naming\nconvention to utilize that library in any Node.js project, and NPM and GitLab will\ndo the rest – all from a single, easy-to-use interface. Yet another step on our path\nto enable all kinds of repositories, built right into GitLab when you need them.\n\n![NPM Packages](https://about.gitlab.com/images/11_7/npm_package_view.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Ability to configure Kubernetes app secrets as variables in Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/#application-secret-variables)\n- [API support for Kubernetes integration](https://docs.gitlab.com/ee/api/project_clusters.html)\n- [Short commit SHA available as environment variable](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html)\n- [Authorization support for fetching includes](https://docs.gitlab.com/ee/ci/yaml/#include)\n- [Skip CI builds during git push with `skip_ci` keyword](https://docs.gitlab.com/ee/ci/pipelines/#skip-a-pipeline)\n\n---\n\n## [GitLab 11.8](/releases/2019/02/22/gitlab-11-8-released/)\n\n### `trigger:` keyword for pipelines\n\nEven as of GitLab 9.3 you were able to create multi-project pipelines by triggering\na downstream pipeline via a GitLab API call in your job. In GitLab 11.8 (February 2019), we added\nfirst-class support for triggering these downstream pipelines with the [`trigger:`](https://docs.gitlab.com/ee/ci/yaml/#trigger)\nkeyword, instead of requiring teams to make an API call to trigger the downstream\npipeline. A bit more for those cross-project use cases that makes everything just\na little bit nicer to use.\n\n![Trigger](https://about.gitlab.com/images/11_8/multi_project_pipeline_graph.png){: .shadow.medium.center}\n\n### Pages support for subgroups\n\nPages was updated in 11.8 to [work with subgroups in GitLab](https://docs.gitlab.com/ee/administration/pages/),\ngiving you the ability to create Pages sites at that level as well. Sites set up in this\nway will have a URL in the format of `toplevel-group.gitlab.io/subgroup/project`,\nmaking them very easy to find.\n\n![Pages for SubGroups](https://about.gitlab.com/images/11_8/release-pages-subgroups.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Several new templates for getting started quickly with GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/#getting-started)\n- [Auto DevOps support for environment-specific custom domain](https://docs.gitlab.com/ee/topics/autodevops/#environment-variables)\n- [Feature Flags was improved by making them environment-aware](https://docs.gitlab.com/ee/operations/feature_flags.html#define-environment-specs)\n- [CI_PAGES and CI_PAGES_URL added as helpful variables accessible to Pages pipelines](https://docs.gitlab.com/ee/user/project/pages/)\n- [.html extensions are now automatically resolved for Pages sites](https://docs.gitlab.com/ee/user/project/pages/)\n- [Tolerations were added to the Kubernetes executor](https://docs.gitlab.com/runner/executors/kubernetes.html#the-keywords)\n- [A new cleanup procedure for the Container Registry](https://docs.gitlab.com/ee/api/container_registry.html#delete-a-repository-tag)\n- [Force redeploy when Auto DevOps secrets are updated](https://docs.gitlab.com/ee/topics/autodevops/#environment-variables)\n\n---\n\n## [GitLab 11.9](/releases/2019/03/22/gitlab-11-9-released/)\n\n### Feature Flag auditability\n\nWith the 11.9 release (March 2019), operations like adding, removing, or changing Feature Flags\nare now [recorded in the GitLab audit log](https://docs.gitlab.com/ee/administration/audit_events.html),\ngiving you visibility into what is changing and when. If you’re having an incident\nand need to see what changed recently, or just need to look back as an auditor on\nhow your feature flags have been modified, this is now very easy to do. We have\nbig plans for Feature Flags, and also compliance built right into your pipelines.\nIt was great to knock out a two-for-one with this one.\n\n![Feature Flag audit events](https://about.gitlab.com/images/11_9/release-ffaudit.png){: .shadow.medium.center}\n\n### Security templates for pipelines\n\nGitLab security features evolve very fast, and they always need to be up to\ndate to be effective and protect your code. We know that changing the job\ndefinition is difficult if you have to manage multiple projects. As of this release we\ninclude bundled security templates [directly into your configuration](https://docs.gitlab.com/ee/user/application_security/sast/#configuring-sast),\nand have them updated with your system every time you upgrade to a new version of\nGitLab, without any change to any pipeline configuration required. Security plus\nreusability, a great combination.\n\n![Security Templates](https://about.gitlab.com/images/11_9/templates.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Project templates for .NET, Go, iOS, and Pages](https://docs.gitlab.com/ee/user/project/working_with_projects.html#built-in-templates)\n- [Run specific jobs on merge requests only when files change](https://docs.gitlab.com/ee/ci/jobs/job_control.html#use-onlychanges-with-merge-request-pipelines)\n- [Auto DevOps build jobs for tags](https://docs.gitlab.com/ee/topics/autodevops/#auto-build)\n\n---\n\n## [GitLab 11.10](/releases/2019/04/22/gitlab-11-10-released/)\n\n### Pipeline dashboard\n\nIn 11.10 (April 2019) we added [pipeline status information to the Operations Dashboard](https://docs.gitlab.com/ee/user/operations_dashboard/).\nThis helps teams view the pipeline health of all the projects that they care about,\nall together in a single interface. Yet another step towards making pipelines across\nyour instance easy to understand and follow, this one was built in real-time coordination\nwith a customer, which is always a nice way to get something done. You get to build\nsomething that solves a real problem and collaborate directly with the folks who\nneed it.\n\n![Pipeline Dashboard](https://about.gitlab.com/images/11_10/cross-project-pipelines-dashboard.gif){: .shadow.medium.center}\n\n### Pipelines on merge results\n\nWhen working in a feature branch, it’s normal to have it diverge over\ntime from the target branch if you aren’t rebasing frequently. This can result\nin a situation where both the source and target branch’s pipelines are green and\nthere are no merge conflicts, but the combined output will result in a failed\npipeline due to an incompatibility between the changes.\n\nWith 11.10 it became possible for a pipeline to automatically create a new ref that\ncontains the combined merge result of the source and target branch, then\n[run the pipeline against that ref](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html)\n(what we call an `attached` state). In this way, GitLab can help teams keep their\nmaster branch green even when they have many teams merging into the release branch.\n\nTools and techniques built right into GitLab for keeping master green was a big\nfocus in the last few releases of 11.x, and will remain so for 12.x as well. Look\nfor [merge trains](https://gitlab.com/gitlab-org/gitlab-ee/issues/9186) to be built\non top of this foundation, and some really cool enhancements around sequencing and\nparallelization of them.\n\n![Merge Ref Pipeline](https://about.gitlab.com/images/11_10/merge_request_pipeline.png){: .shadow.medium.center}\n\n### Composable Auto DevOps\n\nAuto DevOps enables teams to adopt modern DevOps practices with little to no effort.\nStarting in GitLab 11.10 each job of Auto DevOps was made available as an\nindependent template. Using the includes feature of GitLab CI, users can [choose to bring in\nonly certain stages of Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/#using-components-of-auto-devops) while continuing to use their own custom\n`gitlab-ci.yml` for the rest. This helps teams to use just the desired jobs, while\ntaking advantage of any updates made upstream.\n\n![Composable Auto DevOps](https://about.gitlab.com/images/11_10/composable-auto-devops.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [More thorough Container Registry cleanup](https://docs.gitlab.com/omnibus/maintenance/#removing-unused-layers-not-referenced-by-manifests)\n- [Ability to purchase CI add-on runner minutes](https://docs.gitlab.com/ee/administration/settings/continuous_integration.html#extra-shared-runners-pipeline-minutes-quota)\n- [Change the cloning path for pipelines](https://docs.gitlab.com/ee/ci/runners/configure_runners.html#custom-build-directories)\n- [Simple masking of protected variables in logs](https://docs.gitlab.com/ee/ci/variables/#masked-variables)\n- [Enable/disable Auto DevOps at the group level](https://docs.gitlab.com/ee/topics/autodevops/#enablingdisabling-auto-devops-at-the-group-level)\n- [Group-level runners for group-level clusters](https://docs.gitlab.com/ee/user/group/clusters/#installing-applications)\n- [Control over `git clean` flags in pipeline jobs](https://docs.gitlab.com/ee/ci/runners/configure_runners.html#git-clean-flags)\n\n---\n\n## [GitLab 11.11](/releases/2019/05/22/gitlab-11-11-released/)\n\n### Windows Container Executor\n\nIn GitLab 11.11 (May 2019) we were very pleased to add a new executor to the GitLab Runner\nfor using [Docker containers on Windows](https://docs.gitlab.com/runner/executors/docker.html#using-windows-containers).\nPreviously, using the shell executor to orchestrate Docker commands was the primary\napproach for Windows, but with this update you are now able to use Docker\ncontainers on Windows directly, in much the same way as if they were on Linux\nhosts. This opened up the door for more advanced kinds of pipeline orchestration\nand management for our users of Microsoft platforms.\n\nAlso included with this update was improved support for PowerShell throughout GitLab\nCI/CD, as well as new helper images for various versions of Windows containers.\n\n![Windows Executor](https://about.gitlab.com/images/11_11/windows-container.png){: .shadow.medium.center}\n\n### Caching proxy for Container Registry\n\nLots of teams are using containers as part of their build pipelines, and our new\n[caching proxy](https://docs.gitlab.com/ee/user/packages/dependency_proxy/index.html) for\nfrequently used upstream images/packages introduced a great way to speed them up.\nBy keeping a copy of needed layers locally using the new caching proxy, you can\neasily improve execution performance for the commonly used images in your environment.\n\n![Dependency Proxy](https://about.gitlab.com/images/11_11/dependency-proxy-mvc.png){: .shadow.medium.center}\n\n### Chat notifications for deployments\n\nIn 11.11 deployment events were available to be [automatically shared in your team’s channel](https://docs.gitlab.com/ee/user/project/integrations/)\nthrough our Slack and Mattermost chat integrations, helping bring visibility to\nthese important activities that your teams need to be aware of.\n\n![Notifications](https://about.gitlab.com/images/11_11/release-slack-notification.png){: .shadow.medium.center}\n\n### Guest Access for Releases\n\nIt also became possible in this release for [guest users of your projects to view releases](https://docs.gitlab.com/ee/user/permissions.html#releases-permissions)\nthat you have published on the Releases page. They will be able to download your\npublished artifacts, but are prevented from downloading the source code or seeing\nrepository information such as tags and commits.\n\n![Guest Releases](https://about.gitlab.com/images/11_7/release-releases_page.png){: .shadow.medium.center}\n\n### Other highlights\n\n- [Add-on runner minutes extended to free plans](https://docs.gitlab.com/ee/administration/settings/continuous_integration.html#extra-shared-runners-pipeline-minutes-quota)\n- [Access deployment details through environments API](https://docs.gitlab.com/ee/api/environments.html#get-a-specific-environment)\n- [Create a file directly from environment variable](https://docs.gitlab.com/ee/ci/variables/#variable-types)\n- [Run all manual jobs for a stage in one click](https://docs.gitlab.com/ee/ci/pipelines/index.html#add-manual-interaction-to-your-pipeline)\n\n---\n\n## In conclusion\n\nPhew... that was a lot of great features, and the team here at GitLab is really proud of\nwhat we delivered with this series of GitLab releases. I hope you found something\nthat you can take advantage of in your own CI/CD process. If you're interested in\nseeing where we're heading next, head over to our [CI/CD strategy page](/direction/ops/)\nand check out what's coming. Also, be sure to check out our 12.0 release post coming out on the 22nd of this month.\n\nOne of the things you may have noticed is that we frequently add new iterations\non our features, even month to month. We have a lot more iterations planned, both\nfor new and existing features, but what would you like to see in the next\nversion of your favorite feature? We'd love to hear – let us know in the\ncomments below.\n\nPhoto by [Zoltan Tasi](https://unsplash.com/photos/O_mBXldZ0hc?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/arrow?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[721,9,695],{"slug":4006,"featured":6,"template":700},"look-back-on-11-11-cicd","content:en-us:blog:look-back-on-11-11-cicd.yml","Look Back On 11 11 Cicd","en-us/blog/look-back-on-11-11-cicd.yml","en-us/blog/look-back-on-11-11-cicd",{"_path":4012,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4013,"content":4019,"config":4024,"_id":4026,"_type":14,"title":4027,"_source":16,"_file":4028,"_stem":4029,"_extension":19},"/en-us/blog/making-builds-faster-autoscaling-runners",{"title":4014,"description":4015,"ogTitle":4014,"ogDescription":4015,"noIndex":6,"ogImage":4016,"ogUrl":4017,"ogSiteName":685,"ogType":686,"canonicalUrls":4017,"schema":4018},"How to make builds faster","How GitLab uses autoscaling to reduce build times and make developers happy.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673173/Blog/Hero%20Images/autoscaling-balance.jpg","https://about.gitlab.com/blog/making-builds-faster-autoscaling-runners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to make builds faster\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-08-21\",\n      }",{"title":4014,"description":4015,"authors":4020,"heroImage":4016,"date":4021,"body":4022,"category":1040,"tags":4023},[715],"2019-08-21","\nPicture this: It’s 5:30 pm on a Friday and a project manager has an urgent request. A\nbug is affecting a group of customers and it needs to be fixed ASAP. You find the discrepancy\nand, _phew_, it looks like it’s going to be a relatively easy fix. You make the update and\nstart the CI pipeline… and then you wait… and wait. Two hours later, you’re still waiting. What was\nsupposed to be a quick fix has turned into another long night sitting in a queue.\n\n[The team at Ticketmaster](/blog/continuous-integration-ticketmaster/) certainly felt the\npain with their Jenkins pipelines, and many [DevOps](/topics/devops/) teams are all too familiar with sluggish CI.\n\nSlow builds hinder development speed. Plus – they’re annoying. It’s just one more thing developers\nhave to deal with in order to do their jobs. Organizations might dedicate more servers to process\nthese builds in an effort to solve the problem, but often that creates more problems. More servers\nmean higher cloud and computing costs. When it comes to long builds, many developers have\nresigned themselves to just “grin and bear it.”\n\n## Making builds _faster_\n\n[Continuous integration](/solutions/continuous-integration/) allows you to run a number of tasks as you\nprepare to deploy your software, like building a software package or running tests. These tasks\nneed to be run by something. At GitLab we call these task enablers runners, though other [CI tools](/solutions/continuous-integration/) call them\nagents. Runners are an application that processes builds: If all of these runners are in use, work\nis queued until one becomes available. Let's say your peak usage is 100 jobs, but your average\nusage is around 25 jobs. You have to decide how many servers to provision. If you go with the\naverage, you will have to wait during peak usage times. So why not just add more runners? Some\nservices actually charge for each of these virtual machines, and if you’re not using them all\nthe time, those costs can add up. If you're on a cloud infrastructure, you're paying for that\nserver time – even when it's not doing anything.\n\nFor ops teams, it’s been a never-ending balancing act of having the right amount of runners\nfor the right amount of work. But tasks don’t happen in a vacuum – every team has slow times\nand busier times that are unpredictable.\n\nNobody likes waiting. With this universal truth in mind, we introduced autoscaling to GitLab Runners.\n\n## What are autoscaling runners?\n\nAutoscaling gives teams the ability to utilize resources in a more elastic and dynamic way. What\nthis means is that our runners can be configured so that machines are created _on demand_.\nThose machines, after the job is finished, can wait to run the next jobs or be removed automatically.\nYou can even specify the `IdleTime` of a server before it shuts off. Once runners are set up to\nautoscale, your infrastructure contains only enough capacity to handle the load.\n\nAutoscaling runners ensure builds can be processed more efficiently and you aren’t paying for\nmore machines than you need. Developers can focus on their code instead of worrying about\ntheir infrastructure environment, and ops teams no longer have to moonlight as soothsayers.\n\nThe only thing you need to take advantage of autoscaling is one GitLab instance and\none [GitLab Runner](https://docs.gitlab.com/runner#features) that can be installed for free.\nOur runner is written in Go and can run on any platform where you can build Go binaries\nincluding Linux, macOS, Windows, FreeBSD, and Docker.\n\nSee how the team at [Substrakt Health](https://substrakthealth.com/) set up an autoscaling\ncluster of GitLab CI/CD runners using Docker-Machine and AWS – and saved 90% on EC2 costs in the process.\n\n[Read their story.](/blog/autoscale-ci-runners/)\n{: .alert .alert-gitlab-purple .text-center}\n\nSpeed and efficiency are important cornerstones of effective DevOps, so waiting for builds has\nalways felt like a step backward. As everyone strives to deploy more software, it seems only right\nthat your architecture be up for the task. Autoscaling runners let DevOps teams focus on what\nthey do best: Deploying better, faster software (yes, even on a Friday).\n\nPhoto by [Austin Neill](https://unsplash.com/@arstyy?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n{: .note}\n",[9,695,721],{"slug":4025,"featured":6,"template":700},"making-builds-faster-autoscaling-runners","content:en-us:blog:making-builds-faster-autoscaling-runners.yml","Making Builds Faster Autoscaling Runners","en-us/blog/making-builds-faster-autoscaling-runners.yml","en-us/blog/making-builds-faster-autoscaling-runners",{"_path":4031,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4032,"content":4038,"config":4044,"_id":4046,"_type":14,"title":4047,"_source":16,"_file":4048,"_stem":4049,"_extension":19},"/en-us/blog/making-ci-easier-with-gitlab",{"title":4033,"description":4034,"ogTitle":4033,"ogDescription":4034,"noIndex":6,"ogImage":4035,"ogUrl":4036,"ogSiteName":685,"ogType":686,"canonicalUrls":4036,"schema":4037},"Making CI/CD easier with GitLab","The team at Trek10 tries to consider the need for automation and repeatability with everything they do. One team member gives a crash course in GitLab CI/CD and explains how they use it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680423/Blog/Hero%20Images/making-ci-easier-with-gitlab.jpg","https://about.gitlab.com/blog/making-ci-easier-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Making CI/CD easier with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rob Ribeiro\"}],\n        \"datePublished\": \"2017-07-13\",\n      }",{"title":4033,"description":4034,"authors":4039,"heroImage":4035,"date":4041,"body":4042,"category":718,"tags":4043},[4040],"Rob Ribeiro","2017-07-13","At [Trek10](https://www.trek10.com/), we always try to consider the need for\nautomation and repeatability with everything that we do. That’s why we focus\non using tools like CloudFormation, [Serverless](/topics/serverless/), and\nCI/CD, as well as building other tools. Recently, I was tasked with doing\nvarious maintenance tasks on a number of internal tools/projects. Some\nneeded upgrades from Node.js 0.10, some needed code fixes, and most needed\nCI/CD. Today, we’re just going to focus on the CI/CD part.\n\n\n\u003C!-- more -->\n\n\nIn spite of my past experience with Jenkins and TeamCity and our team’s\nexperience with AWS (CodePipeline/CodeDeploy), I chose [GitLab\nCI/CD](/topics/ci-cd/) to standardize these projects. The biggest reason for\nthis choice is history. As a project evolves, its CI/CD configuration may\nchange. If you ever need to go back in time, you may have difficulty\ndeploying again. Since GitLab CI/CD is based on a `.gitlab-ci.yml` config\nfile that is committed with the code, as long as a commit built and deployed\nthen, it stands a pretty good chance of building and deploying now. Being\nable to tweak CI/CD without leaving my editor was an additional bonus.\n\n\n### Crash course in GitLab CI/CD\n\n\nGitLab CI/CD relies on having a `.gitlab-ci.yml` file in the root of your\nrepo. CI/CD for each commit is run against the `.gitlab-ci.yml` that is\ncurrent for that commit. The fundamental unit of CI/CD for GitLab is a\n“job”. A job is a construct that runs a bash script against a commit in a\nparticular context. You might have one job to run tests, other jobs to build\nfor staging or production, and other jobs to deploy to particular\nenvironments. In the config file, jobs are represented by top level maps\n(aka “objects”) that are not otherwise “reserved” GitLab CI/CD maps.\nExamples of reserved top level maps: `image` (Docker image in which your\njobs run), `services` (other Docker images that need to run while your jobs\nrun), `before_script` (runs before every `script`), `after_script` (runs\nafter every `script`), `stages` (redefines the stage names and order),\n`variables` (variables available to all jobs), and `cache` (controls what is\ncached between CI/CD runs; good for stuff from your package manager).\n\n\nEvery job must belong to a stage (if left out, `test` is the default).\nStages are run in a sequence, and all of the jobs in a stage run with max\nparallelism available. The default stage sequence is: `build`, `test`,\n`deploy`. Each job also has `before_script`, `after_script`, `variables`,\nand `cache`. Defining these at a job level will override the top-level\nconfiguration. The most important of these is `variables`, because your\nvariables are what make the production deploy job’s context different from\nthe staging deploy job’s context. `variables` is just a map with a bunch of\nkey value pairs. Variables are consumed with a syntax similar to bash:\n`${myVar}`. There are some limitations that you should know:\n\n\n* Variables do not support bash variable expansions, substitutions,\ndefaults, etc.\n\n* Variables do not recurse or have a sense of order of evaluation, but top\nlevel variables can be used in job level variables. See the following\nexamples:\n\n\n```\n\n# You CANNOT do this (referencing a sibling variable in the same map)\n\nvariables:\n    PROD_STAGE_NAME: prod\n    PROD_URL: https://thisismywebsite.com/${PROD_STAGE_NAME}\n```\n\n\n```\n\n# You CAN do this (referencing a top-level variable from a job's variables\nmap)\n\nvariables:\n    PROD_STAGE_NAME: prod\n\nmy_job:\n    variables:\n        STAGE_NAME: ${PROD_STAGE_NAME}\n```\n\n\n```\n\n# But you CANNOT do something like this (nested variables)\n\nvariables:\n    CURRENT_STAGE: PROD\n    PROD_STAGE_NAME: prod\n\nmy_job:\n    variables:\n        STAGE_NAME: ${${CURRENT_STAGE}_STAGE_NAME}\n```\n\n\nThat last example gives us a ton of power. We’ll be sure to abuse that as we\ngo.\n\n\nAs mentioned before, jobs run a bash script in a context. So every job must\nhave a `script`. The last big thing that you need is “flow control”. By\ndefault, a job will run on every commit. Using the `only`, `except`, and\n`when` keys allows you to control how jobs are triggered. `only` and\n`except` accept the following options:\n\n\n* Branch names, e.g. `master` or `develop`\n\n* Tag names\n\n* JS style RegExp literals to evaluate against branch/tag names\n\n* These special keywords: `api`, `branches`, `external`, `tags`, `pushes`,\n`schedules`, `triggers`, and `web`\n\n* Using `branches` and `tags` with `only` cause a job to be run for every\nbranch or tag, respectively\n\n* Repo path filters to deal with repo forks\n\n\nOne more important fact: jobs that start with a period character are\ndisabled, e.g.: `.my_disabled_job`\n\n\nThat should be enough to get us started. You can find more [GitLab CI/CD\ndocumentation here](https://docs.gitlab.com/ee/ci/). The most useful bit is\nthe `.gitlab-ci.yml` reference found\n[here](https://docs.gitlab.com/ee/ci/yaml/).\n\n\nAs with any new tool, I got to read and re-read the documentation and make\nsome mistakes getting things right. By the time I was knee-deep in this, I\nrealized there was a need to prevent anyone from having to do this again,\nmyself included. The solution requires two things: a well-designed CI/CD\ntemplate and a way to get that template into all of your new repositories.\nLet’s tackle template design next.\n\n\n### Designing a template\n\n\nThis part is hard to talk about in a completely generic manner. Instead,\nlet’s walk through our use case. Looking at our projects past and present, I\ncould usually bet on these characteristics:\n\n\n* Deploys to AWS (we are an AWS consultancy after all…)\n\n* Uses Serverless framework with Node.js or Python\n\n* May deploy production to multiple regions\n\n* May deploy different stages to different accounts\n\n\nIn addition, I realized that I needed these other options:\n\n\n* May need to “disable” dev/staging from doing real work\n\n* May want one dev environment per branch\n\n\nFinally, we decided on the following deployment strategy:\n\n\n* Production deploys via tags on `master`\n\n* Staging deploys on commits/merges to `master`\n\n* Dev deploys should work for all other branches (we’re not going to\nimplement this one in this post)\n\n\nMy roots are as a software developer, so making things reusable is a core\nskill at this point. A good template is going to make it super easy for the\nintended cases and be fairly adaptable for other uses. Here is the goal:\n\n\n* One script per stage. That means only one test script, one build script,\nand one deploy script. Oh, and keep it DRY.\n\n* Jobs should be as similar as possible, and differences should be tweaked\nby top level variables.\n\n\nLet’s focus on that single script per stage. We’re not going to cover how to\nwrite the deployment script, but we’ll focus on the deploy stage. But let’s\nsay we start with a deployment job like this:\n\n\n```\n\ndeploy:production:\n    stage: deploy\n    script: |\n        # assume ${DEPLOYMENT_ROLE} in AWS\n        # install dependencies\n        # run serverless deployment with ${STAGE_NAME} ${REGION}\n    variables:\n        DEPLOYMENT_ROLE: arn:aws:iam::1234567890:role/gitlab-ci-deployment\n        STAGE_NAME: prod\n        REGION: us-east-1\n        ACCOUNT: \"1234567890\"\n    only:\n        - tags\n```\n\n\n\nNow we could copy and tweak this for staging and dev, but that’s not what\nwe’re after. First, let’s break the script off to a reusable chunk and use\nit in our staging deploy:\n\n\n```\n\n.deployment_script: &deployment_script\n    stage: deploy\n    script: |\n        # assume ${DEPLOYMENT_ROLE} in AWS\n        # install dependencies\n        # run serverless deployment with ${STAGE_NAME} ${REGION}\n\ndeploy:production:\n    \u003C\u003C: *deployment_script\n    variables:\n        DEPLOYMENT_ROLE: arn:aws:iam::1234567890:role/gitlab-ci-deployment\n        STAGE_NAME: prod\n        REGION: us-east-1\n        ACCOUNT: \"1234567890\"\n        PRODUCTION: \"true\"\n    only:\n        - tags\n\ndeploy:staging:\n    \u003C\u003C: *deployment_script\n    variables:\n        DEPLOYMENT_ROLE: arn:aws:iam::0987654321:role/gitlab-ci-deployment\n        STAGE_NAME: staging\n        REGION: us-east-1\n        ACCOUNT: \"0987654321\"\n    only:\n        - master\n```\n\n\nUsing YAML anchors and references, we can inject the script into all of our\ndeployment jobs. Notice that the deployment script is disabled. This is\nbecause we don’t want it to run in parallel with all of our intended jobs.\nWe also added a `PRODUCTION` environment variable to just the production\ndeploy to allow our script to pick that up too. If your code knows about\nthis, you can use this to turn on/off production-only features. Now, we can\nmake this cleaner and easier for our developers by pulling all of the\n`variables` to a top-level variables map at the top of the file:\n\n\n```\n\nvariables:\n    PROD_ACCOUNT: \"1234567890\"\n    PROD_STAGE_NAME: prod\n    PROD_REGION: us-east-1\n    STAGING_ACCOUNT: \"0987654321\"\n    STAGING_STAGE_NAME: staging\n    STAGING_REGION: us-east-1\n\n.deployment_script: &deployment_script\n    stage: deploy\n    script: |\n        # assume ${DEPLOYMENT_ROLE} in AWS\n        # install dependencies\n        # run serverless deployment with ${STAGE_NAME}, ${REGION}, and ${ACCOUNT}\n\ndeploy:production:\n    \u003C\u003C: *deployment_script\n    variables:\n        DEPLOYMENT_ROLE: \"arn:aws:iam::${PROD_ACCOUNT}:role/gitlab-ci-deployment\"\n        STAGE_NAME: ${PROD_STAGE_NAME}\n        REGION: ${PROD_REGION}\n        ACCOUNT: ${PROD_ACCOUNT}\n        PRODUCTION: \"true\"        \n    only:\n        - tags\n\ndeploy:staging:\n    \u003C\u003C: *deployment_script\n    variables:\n        DEPLOYMENT_ROLE: \"arn:aws:iam::${STAGING_ACCOUNT}:role/gitlab-ci-deployment\"\n        STAGE_NAME: ${STAGING_STAGE_NAME}\n        REGION: ${STAGING_REGION}\n        ACCOUNT: ${STAGING_ACCOUNT}\n    only:\n        - master\n```\n\n\n\nNow, that’s looking more reusable, and we have accomplished our second goal\nof making the jobs very similar and controlled by top-level variables. This\nmakes it easy for anyone who fits the template’s use case perfectly to reuse\nit. We could easily add the dev environment, but we’ll skip that in favor of\nillustrating multi-region production deploys:\n\n\n```\n\nvariables:\n    PROD_ACCOUNT: \"1234567890\"\n    PROD_STAGE_NAME: prod\n    PROD1_REGION: us-east-1\n    PROD2_REGION: us-west-2\n    STAGING_ACCOUNT: \"0987654321\"\n    STAGING_STAGE_NAME: staging\n    STAGING_REGION: us-east-1\n\n.deployment_script: &deployment_script\n    stage: deploy\n    script: |\n        # assume ${DEPLOYMENT_ROLE} in AWS\n        # install dependencies\n        # run serverless deployment with ${STAGE_NAME}, ${REGION}, and ${ACCOUNT}\n\n.production_variables\n    DEPLOYMENT_ROLE: \"arn:aws:iam::${PROD_ACCOUNT}:role/gitlab-ci-deployment\"\n    STAGE_NAME: ${PROD_STAGE_NAME}\n    ACCOUNT: ${PROD_ACCOUNT}\n    PRODUCTION: \"true\"    \n\ndeploy:production_1: &deploy_production\n    \u003C\u003C: *deployment_script\n    variables:\n        \u003C\u003C: *production_variables\n        REGION: ${PROD1_REGION}\n    only:\n        - tags\n\ndeploy:production_2:\n    \u003C\u003C: *deploy_production\n    variables:\n        \u003C\u003C: *production_variables\n        REGION: ${PROD2_REGION}        \n\ndeploy:staging:\n    \u003C\u003C: *deployment_script\n    variables:\n        DEPLOYMENT_ROLE: \"arn:aws:iam::${STAGING_ACCOUNT}:role/gitlab-ci-deployment\"\n        STAGE_NAME: ${STAGING_STAGE_NAME}\n        REGION: ${STAGING_REGION}\n        ACCOUNT: ${STAGING_ACCOUNT}\n    only:\n        - master\n```\n\n\nNotice that we have changed the job names to reflect having multiple\nregions. In addition, we are making use of YAML anchors and references to\ncopy the entire `deploy:production_1` job into `deploy:production_2` and\nthen we just override the `REGION` variable. This makes adding additional\nregions super easy.\n\n\nWhat’s more useful at this point is that, as long as you have made your\nscript flexible enough, you can now distribute this to your development team\nas a template. If their project fits the script and configuration perfectly,\nthey should just have to fill in the correct values for the top-level\nvariables and go. For those needing something different, they should\nhopefully be able to just tweak the script. Now, we just need to solve the\nproblem of making sure that they actually use the template…\n\n\n### Automatic CI/CD injection with GitLab and AWS Lambda\n\n\nI was inspired by GitHub’s option to select a .gitignore and license during\nthe repo creation process. What if we could have that for CI? Forking GitLab\nand figuring out how to hack this in did not sound like a quick or easy\nthing to do. However, after a little research, I found that we could use a\nsystem hook to trigger a Lambda that could inject the desired template via\nthe commit API. This part is not as interesting to read about, so we did one\nbetter: we have open sourced this tool so you can deploy it in your\nenvironment. Check out the repo\n[here](https://github.com/trek10inc/gitlab-boilerplate-injector). And if\nyou’re looking for someone to help you implement these and other awesome\nautomations and AWS solutions, we would love to talk to you. Feel free to\nreach out to us at info@trek10.com for more. Thanks for reading!\n\n\n## About the Guest Author\n\n\nRob has spent his career honing his interpersonal, technical, and problem\nsolving skills. He spent five years in customer service and management,\nfollowed by over five years in software development and consulting. He has\nexperience working and consulting for everything from startups to Fortune\n500 enterprises in a variety of industries including manufacturing,\nhealthcare, and finance. Rob has earned a MS in Applied Mathematics and\nComputer Science from Indiana University and a BS in Pharmaceutical Sciences\nfrom Purdue University.\n",[9,763],{"slug":4045,"featured":6,"template":700},"making-ci-easier-with-gitlab","content:en-us:blog:making-ci-easier-with-gitlab.yml","Making Ci Easier With Gitlab","en-us/blog/making-ci-easier-with-gitlab.yml","en-us/blog/making-ci-easier-with-gitlab",{"_path":4051,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4052,"content":4058,"config":4064,"_id":4066,"_type":14,"title":4067,"_source":16,"_file":4068,"_stem":4069,"_extension":19},"/en-us/blog/managing-gitlab-resources-with-pulumi",{"title":4053,"description":4054,"ogTitle":4053,"ogDescription":4054,"noIndex":6,"ogImage":4055,"ogUrl":4056,"ogSiteName":685,"ogType":686,"canonicalUrls":4056,"schema":4057},"Managing GitLab resources with Pulumi","Learn how Pulumi's infrastructure-as-code tool helps streamline the automation of GitLab CI/CD workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683430/Blog/Hero%20Images/AdobeStock_293854129__1_.jpg","https://about.gitlab.com/blog/managing-gitlab-resources-with-pulumi","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Managing GitLab resources with Pulumi\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Josh Kodroff, Pulumi\"}],\n        \"datePublished\": \"2024-01-10\",\n      }",{"title":4053,"description":4054,"authors":4059,"heroImage":4055,"date":4061,"body":4062,"category":741,"tags":4063},[4060],"Josh Kodroff, Pulumi","2024-01-10","In the ever-evolving landscape of DevOps, platform engineers are\nincreasingly seeking efficient and flexible tools to manage their GitLab\nresources, particularly for orchestrating continuous integration/continuous\ndelivery (CI/CD) pipelines.\n[Pulumi](https://pulumi.com?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources)\noffers a unique approach to infrastructure as code (IaC) by allowing\nengineers to use familiar programming languages such as TypeScript, Python,\nGo, and others. This approach streamlines the automation of GitLab CI/CD\nworkflows. Pulumi's declarative syntax, combined with its ability to treat\ninfrastructure as software, facilitates version control, collaboration, and\nreproducibility, aligning seamlessly with the GitLab philosophy.\n\n\nLet's explore the power of using Pulumi and GitLab.\n\n\n## What is Pulumi?\n\n\nPulumi is an IaC tool that allows you to manage resources in more than 150\nsupported cloud or SaaS products (including AWS and GitLab, which we will be\ndemonstrating in this post). You can express your infrastructure with Pulumi\nusing popular general-purpose programming languages like TypeScript, Python,\nand Go.\n\n\nPulumi is declarative (just like other popular IaC tools you may be familiar\nwith), which means that you only need to describe the desired end state of\nyour resources and Pulumi will figure out the order of create, read, update,\nand delete (CRUD) operations to get from your current state to your desired\nstate.\n\n\nIt might seem strange at first to use a general-purpose programming language\nto express your infrastructure's desired state if you're used to tools like\nCloudFormation or Terraform, but there are considerable advantages to\nPulumi's approach, including the following:\n\n- **Familiar tooling.** You don't need any special tooling to use Pulumi.\nCode completion will work as expected in your favorite editor or IDE without\nany additional plugins. You can share Pulumi code using familiar packaging\ntools like npm, PyPI, etc.\n\n- **Familiar syntax.** Unlike with DSL-based IaC tools, you don't need to\nlearn special ways of indexing an array element, or creating loops or\nconditionals - you can just use the normal syntax of a language you already\nknow.\n\n\nThe Pulumi product has an open source component, which includes the Pulumi\ncommand line and its ecosystem of providers, which provide the integration\nbetween Pulumi and the cloud and SaaS providers it supports. Pulumi also\noffers a free (for individual use) and paid (for teams and organizations)\nSaaS service called Pulumi Cloud, which provides state file and secrets\nmanagement, among many other useful features. It’s a widely-supported\nopen-source IaC tool.\n\n\n## Initializing the project\n\n\nTo complete this example you'll need:\n\n\n1. [A Pulumi Cloud\naccount](https://app.pulumi.com?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources).\nPulumi Cloud is free for individual use forever and we'll never ask for your\ncredit card. Pulumi Cloud will manage your Pulumi state file and handle any\nsecrets encryption/decryption. Because it's free for individual use (no\ncredit card required), we strongly recommend that you use Pulumi Cloud as\nyour backend when learning how to use Pulumi.\n\n2. A GitLab account, group, and a GitLab token set to the `GITLAB_TOKEN`\nenvironment variable.\n\n3. An AWS account and credentials with permissions to deploy identity and\naccess management (IAM) resources. For details on how to configure AWS\ncredentials on your system for use with Pulumi, see [AWS Classic:\nInstallation and\nConfiguration](https://www.pulumi.com/registry/packages/aws/installation-configuration/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources).\n\n\nThis example will use two providers from the [Pulumi\nRegistry](https://www.pulumi.com/registry/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources):\n\n\n1. The [GitLab\nProvider](https://www.pulumi.com/registry/packages/gitlab/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources)\nwill be used to manage resources like Projects, ProjectFiles (to initialize\nour project repository), ProjectHooks (for the integration with Pulumi\nCloud), and ProjectVariables (to hold configuration for our CI/CD\npipelines).\n\n2. The [AWS Classic\nProvider](https://www.pulumi.com/registry/packages/aws/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources)\nwill be used to manage AWS resources to create OpenID Connect (OIDC)\nconnectivity between AWS and GitLab.\n\n\nYou can initialize your Pulumi project by changing into a new, empty\ndirectory, running the following command, and accepting all the default\nvalues for any subsequent prompts:\n\n\n```bash\n\npulumi new typescript\n\n```\n\n\nThis will bootstrap an empty Pulumi program. Now you can import the provider\nSDKs for the providers you'll need:\n\n\n```bash\n\nnpm i @pulumi/aws @pulumi/gitlab\n\n```\n\n\nYour `index.ts` file is the entry point into your Pulumi program (just as\nyou would expect in any other Node.js program) and will be the file to which\nyou will add your resources. Add the following imports to the top of\n`index.ts`:\n\n\n```typescript\n\nimport * as gitlab from \"@pulumi/gitlab\";\n\nimport * as aws from \"@pulumi/aws\";\n\n```\n\n\nNow you are ready to add some resources!\n\n\n## Adding your first resources\n\n\nFirst, let's define a variable that will hold the audience claim in our OIDC\nJWT token. Add the following code to `index.ts`:\n\n\n```typescript\n\nconst audience = \"gitlab.com\";\n\n```\n\n\nThe above code assume you're using the GitLab SaaS (\u003Chttps://gitlab.com>) If\nyou are using a private GitLab install, your value should be the domain of\nyour GitLab install, e.g. `gitlab.example.com`.\n\n\nThen, you'll use a [Pulumi\nfunction](https://www.pulumi.com/docs/concepts/resources/functions/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources)\nto grab an existing GitLab group by name and create a new public GitLab\nproject in your GitLab group:\n\n\n```typescript\n\nconst group = gitlab.getGroup({\n  fullPath: \"my-gitlab-group\", // Replace the value with the name of your GL group\n});\n\n\nconst project = new gitlab.Project(\"pulumi-gitlab-demo\", {\n  visibilityLevel: \"public\",\n  defaultBranch: \"main\",\n  namespaceId: group.then(g => parseInt(g.id)),\n  archiveOnDestroy: false // Be sure to set this to `true` for any non-demo repos you manage with Pulumi!\n});\n\n```\n\n\n## Creating OIDC resources\n\n\nTo allow GitLab CI/CD to request and be granted temporary AWS credentials,\nyou'll need to create an OIDC provider in AWS that contains the thumbprint\nof GitLab's certificate, and then create an AWS role that GitLab is allowed\nto assume.\n\n\nYou'll scope the assume role policy so that the role can be only be assumed\nby the GitLab project you declared earlier. The role that GitLab CI/CD\nassumed will have full administrator access so that Pulumi can create and\nmanage any resource within AWS. (Note that it is possible to grant less than\n`FullAdministrator` access to Pulumi, but `FullAdministrator` is often\npractically required, e.g. where IAM resources, like roles, need to be\ncreated. Role creation requires `FullAdministrator`. This consideration also\napplies to IaC tools like Terraform.)\n\n\nAdd the following code to `index.ts`:\n\n\n```typescript\n\nconst GITLAB_OIDC_PROVIDER_THUMBPRINT =\n\"b3dd7606d2b5a8b4a13771dbecc9ee1cecafa38a\";\n\n\nconst gitlabOidcProvider = new\naws.iam.OpenIdConnectProvider(\"gitlab-oidc-provider\", {\n  clientIdLists: [`https://${audience}`],\n  url: `https://${audience}`,\n  thumbprintLists: [GITLAB_OIDC_PROVIDER_THUMBPRINT],\n}, {\n  deleteBeforeReplace: true, // URLs are unique identifiers and cannot be auto-named, so we have to delete before replace.\n});\n\n\nconst gitlabAdminRole = new aws.iam.Role(\"gitlabAdminRole\", {\n  assumeRolePolicy: {\n    Version: \"2012-10-17\",\n    Statement: [\n      {\n        Effect: \"Allow\",\n        Principal: {\n          Federated: gitlabOidcProvider.arn,\n        },\n        Action: \"sts:AssumeRoleWithWebIdentity\",\n        Condition: {\n          StringLike: {\n            // Note: Square brackets around the key are what allow us to use a\n            // templated string. See:\n            // https://stackoverflow.com/questions/59791960/how-to-use-template-literal-as-key-inside-object-literal\n            [`${audience}:sub`]: pulumi.interpolate`project_path:${project.pathWithNamespace}:ref_type:branch:ref:*`\n          },\n        },\n      },\n    ],\n  },\n});\n\n\nnew aws.iam.RolePolicyAttachment(\"gitlabAdminRolePolicy\", {\n  policyArn: \"arn:aws:iam::aws:policy/AdministratorAccess\",\n  role: gitlabAdminRole.name,\n});\n\n```\n\n\nA few things to be aware of regarding the thumbprint:\n\n\n1. If you are self-hosting GitLab, you'll need to obtain the thumbprint from\nyour private GitLab installation.\n\n2. If you're using GitLab SaaS, it's possible GitLab's OIDC certificate may\nhave been rotated by the time you are reading this.\n\n\nIn either case, you can obtain the correct/latest thumbprint value by\nfollowing AWS' instructions contained in [Obtaining the thumbprint for an\nOpenID Connect Identity\nProvider](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc_verify-thumbprint.html)\nin the AWS docs.\n\n\nYou'll also need to add the role's ARN as a project variable so that the\nCI/CD process can make a request to assume the role:\n\n\n```typescript\n\nnew gitlab.ProjectVariable(\"role-arn\", {\n  project: project.id,\n  key: \"ROLE_ARN\",\n  value: gitlabAdminRole.arn,\n});\n\n```\n\n\n## Project hook (optional)\n\n\nPulumi features an integration with GitLab via a webhook that will post the\noutput of the `pulumi preview` directly to a merge request as a comment. For\nthe webhook to work, you must have a Pulumi organization set up with GitLab\nas its SSO source. If you don't have a Pulumi organization and would like to\ntry the integration, you can [sign up for a free\ntrial](https://app.pulumi.com/signup?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources)\norganization. The trial lasts 14 days, will give you access to all of\nPulumi's paid features, and does not require a credit card. For full details\non the integration, see [Pulumi CI/CD & GitLab\nintegration](https://www.pulumi.com/docs/using-pulumi/continuous-delivery/gitlab-app/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources).\n\n\nTo set up the webhook, add the following to your `index.ts` file:\n\n\n```typescript\n\nnew gitlab.ProjectHook(\"project-hook\", {\n  project: project.id,\n  url: \"https://api.pulumi.com/workflow/gitlab\",\n  mergeRequestsEvents: true,\n  enableSslVerification: true,\n  token: process.env[\"PULUMI_ACCESS_TOKEN\"]!,\n  pushEvents: false,\n});\n\n```\n\n\nNote that the above resource assumes that your Pulumi access token is stored\nas an environment variable. You may want to instead store the token in your\nstack configuration file. To do this, run the following command:\n\n\n```bash\n\npulumi config set --secret pulumiAccessToken ${PULUMI_ACCESS_TOKEN}\n\n```\n\n\nThis will store the encrypted value in your Pulumi stack configuration file\n(`Pulumi.dev.yaml`). Because the value is encrypted, you can safely commit\nyour stack configuration file to git. You can access its value in your\nPulumi program like this:\n\n\n```typescript\n\nconst config = new pulumi.Config();\n\nconst pulumiAccessToken = config.requireSecret(\"pulumiAccessToken\");\n\n```\n\n\nFor more details on secrets handling in Pulumi, see\n[Secrets](https://www.pulumi.com/docs/concepts/secrets/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources)\nin the Pulumi docs.\n\n\n## Creating a repository and adding repository files\n\n\nYou'll need to create a git repository (a GitLab project) and add some files\nto it that will control the CI/CD process. First, create some files that\nyou'll include in your GitLab repo:\n\n\n```bash\n\nmkdir -p repository-files/scripts\n\ntouch repository-files/.gitlab-ci.yml\nrepository-files/scripts/{aws-auth.sh,pulumi-preview.sh,pulumi-up.sh}\n\nchmod +x\nrepository-files/scripts/{aws-auth.sh,pulumi-preview.sh,pulumi-up.sh}\n\n```\n\n\nNext, you'll need a GitLab CI/CD YAML file to describe the pipeline: which\ncontainer image it should be run in and what the steps of the pipeline are.\nPlace the following code into `repository-files/.gitlab-ci.yml`:\n\n\n```yaml\n\ndefault:\n  image:\n    name: \"pulumi/pulumi:3.91.1\"\n    entrypoint: [\"\"]\n\nstages:\n  - infrastructure-update\n\npulumi-up:\n  stage: infrastructure-update\n  id_tokens:\n    GITLAB_OIDC_TOKEN:\n      aud: https://gitlab.com\n  before_script:\n    - chmod +x ./scripts/*.sh\n    - ./scripts/aws-auth.sh\n  script:\n    - ./scripts/pulumi-up.sh\n  only:\n    - main # i.e., the name of the default branch\n\npulumi-preview:\n  stage: infrastructure-update\n  id_tokens:\n    GITLAB_OIDC_TOKEN:\n      aud: https://gitlab.com\n  before_script:\n    - chmod +x ./scripts/*.sh\n    - ./scripts/aws-auth.sh\n  script:\n    - ./scripts/pulumi-preview.sh\n  rules:\n    - if: $CI_PIPELINE_SOURCE == 'merge_request_event'\n```\n\n\nThe CI/CD process is fairly simple but illustrates the basic functionality\nneeded for a production-ready pipeline (or these steps may be all your\norganization needs):\n\n\n1. Run the `pulumi preview` command when a merge request is opened or\nupdated. This will help the reviewer gain important context. Because IaC is\nnecessarily stateful (the state file is what enables Pulumi to be a\ndeclarative tool), when reviewing changes reviewers _must have both the code\nchanges and the infrastructure changes to fully understand the impact of\nchanges to the codebase_. This process constitutes continuous integration.\n\n2. Run the `pulumi up` command when code is merged to the default branch\n(called `main` by default). This process constitutes continuous delivery.\n\n\nNote that this example uses the\n[`pulumi/pulumi`](https://hub.docker.com/r/pulumi/pulumi) \"kitchen sink\"\nimage that contains all the runtimes for all the languages Pulumi supports,\nalong with some ancillary tools like the AWS CLI (which you'll need in order\nto use OIDC authentication). While the `pulumi/pulumi` image is convenient,\nit's also quite large (1.41 GB at the time of writing), which makes it\nrelatively slow to initialize. If you're creating production pipelines using\nPulumi, you may want to consider creating your own custom (slimmer) image\nthat has exactly the tools you need installed, perhaps starting with one of\nPulumi's language-specific images, e.g.\n[`pulumi/pulumi-nodejs`](https://hub.docker.com/r/pulumi/pulumi-nodejs).\n\n\nThen you'll need to write the script that authenticates GitLab with AWS via\nOIDC. Place the following code in `repository-files/scripts/aws-auth.sh`:\n\n\n```bash\n\n#!/bin/bash\n\n\nmkdir -p ~/.aws\n\necho \"${GITLAB_OIDC_TOKEN}\" > /tmp/web_identity_token\n\necho -e \"[profile\noidc]\\nrole_arn=${ROLE_ARN}\\nweb_identity_token_file=/tmp/web_identity_token\"\n> ~/.aws/config\n\n\necho \"length of GITLAB_OIDC_TOKEN=${#GITLAB_OIDC_TOKEN}\"\n\necho \"ROLE_ARN=${ROLE_ARN}\"\n\n\nexport AWS_PROFILE=\"oidc\"\n\naws sts get-caller-identity\n\n```\n\n\nFor continuous integration, you'll need a script that will execute the\n`pulumi preview` command when a merge request is opened. Place the following\ncode in `repository-files/scripts/pulumi-preview.sh`:\n\n\n```bash\n\n#!/bin/bash\n\nset -e -x\n\n\nexport PATH=$PATH:$HOME/.pulumi/bin\n\n\nyarn install\n\npulumi login\n\npulumi org set-default $PULUMI_ORG\n\npulumi stack select dev\n\nexport AWS_PROFILE=\"oidc\"\n\npulumi preview\n\n```\n\n\nFor continuous delivery, you'll need a similar script that will execute the\n`pulumi up` command when the Merge Request is merged to the default branch.\nPlace the following code in `repository-files/scripts/pulumi-up.sh`:\n\n\n```bash\n\n#!/bin/bash\n\nset -e -x\n\n\n# Add the pulumi CLI to the PATH\n\nexport PATH=$PATH:$HOME/.pulumi/bin\n\n\nyarn install\n\npulumi login\n\npulumi org set-default $PULUMI_ORG\n\npulumi stack select dev\n\nexport AWS_PROFILE=\"oidc\"\n\npulumi up -y\n\n```\n\n\nFinally, you'll need to add these files to your GitLab Project. Add the\nfollowing code block to your `index.ts` file:\n\n\n```typescript\n\n[\n  \"scripts/aws-auth.sh\",\n  \"scripts/pulumi-preview.sh\",\n  \"scripts/pulumi-up.sh\",\n  \".gitlab-ci.yml\",\n].forEach(file => {\n  const content = fs.readFileSync(`repository-files/${file}`, \"utf-8\");\n\n  new gitlab.RepositoryFile(file, {\n    project: project.id,\n    filePath: file,\n    branch: \"main\",\n    content: content,\n    commitMessage: `Add ${file},`,\n    encoding: \"text\",\n  });\n});\n\n```\n\n\nNote that we're able to take advantage of general-purpose programming\nlanguage features: We are able to create an array and use `forEach()` to\niterate through its members, and we are able to use the `fs.readFileSync()`\nmethod from the Node.js runtime to read the contents of our file. This is\npowerful stuff!\n\n\n## Project variables and stack outputs\n\n\nYou'll need a few more resources to complete the code. Your CI/CD process\nwill need a Pulumi access token in order to authenticate against the Pulumi\nCloud backend which holds your Pulumi state file and handles encryption and\ndecryption of secrets. You will also need to supply name of your Pulumi\norganization. (If you are using Pulumi Cloud as an individual, this is your\nPulumi username.) Add the following to `index.ts`:\n\n\n```typescript\n\nnew gitlab.ProjectVariable(\"pulumi-access-token\", {\n  project: project.id,\n  key: \"PULUMI_ACCESS_TOKEN\",\n  value: process.env[\"PULUMI_ACCESS_TOKEN\"]!,\n  masked: true,\n});\n\n\nnew gitlab.ProjectVariable(\"pulumi-org\", {\n  project: project.id,\n  key: \"PULUMI_ORG\",\n  value: pulumi.getOrganization(),\n});\n\n```\n\n\nFinally, you'll need to add a stack output so that we can run the `git\nclone` command to test out our pipeline. Stack outputs allow you to access\nvalues within your Pulumi program from the command line or from other Pulumi\nprograms. For more information, see [Understanding Stack\nOutputs](https://www.pulumi.com/learn/building-with-pulumi/stack-outputs/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources).\nAdd the following to `index.ts`:\n\n\n```typescript\n\nexport const gitCloneCommand = pulumi.interpolate`git clone\n${project.sshUrlToRepo}`;\n\n```\n\n\n## Deploying your infrastructure and testing the pipeline\n\n\nTo deploy your resources, run the following command:\n\n\n```bash\n\npulumi up\n\n```\n\n\nPulumi will output a list of the resources it intends to create. Select\n`yes` to continue.\n\n\nOnce the command has completed, you can run the following command to get the\ngit clone command for your GitLab repo:\n\n\n```bash\n\npulumi stack output gitCloneCommand\n\n```\n\n\nIn a new, empty directory, run the `git clone` command from your Pulumi\nstack output, e.g.:\n\n\n```bash\n\ngit clone git@gitlab.com:jkodroff/pulumi-gitlab-demo-9de2a3b.git\n\n```\n\n\nChange into the directory and create a new branch:\n\n\n```bash\n\ngit checkout -b my-first-branch\n\n```\n\n\nNow you are ready to create some sample infrastructure in our repository.\nYou can use the `aws-typescript` to quickly generate a simple Pulumi program\nwith AWS resources:\n\n\n```bash\n\npulumi new aws-typescript -y --force\n\n```\n\n\nThe template includes a very simple Pulumi program that you can use to prove\nout the pipeline:\n\n\n```bash\n\n$ cat index.ts\n\nimport * as pulumi from \"@pulumi/pulumi\";\n\nimport * as aws from \"@pulumi/aws\";\n\nimport * as awsx from \"@pulumi/awsx\";\n\n\n// Create an AWS resource (S3 Bucket)\n\nconst bucket = new aws.s3.Bucket(\"my-bucket\");\n\n\n// Export the name of the bucket\n\nexport const bucketName = bucket.id;\n\n```\n\n\nCommit your changes and push your branch:\n\n\n```bash\n\ngit add -A\n\ngit commit -m \"My first commit.\"\n\ngit push\n\n```\n\n\nIn the GitLab UI, create a merge request for your branch:\n\n\n![Screenshot demonstrating opening a GitLab Merge\nRequest](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683438/Blog/Content%20Images/create-merge-request.jpg)\n\n\nYour merge request pipeline should start running:\n\n\n![Screenshot demonstrating opening a GitLab Merge\nRequest](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683438/Blog/Content%20Images/merge-request-running.jpg)\n\n\nOnce the pipeline completes, you should see the output of the `pulumi\npreview` command in the pipeline's logs:\n\n\n![Screenshot of a GitLab pipeline log showing the output of the \"pulumi\npreview\"\ncommand](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683438/Blog/Content%20Images/pulumi-preview.jpg)\n\n\nIf you installed the optional webhook, you should see the results of `pulumi\npreview` posted back to the merge request as a comment:\n\n\n![Screenshot of the GitLab Merge Request screen showing the output of the\n\"pulumi preview\" command as a\ncomment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683438/Blog/Content%20Images/merge-request-comment.jpg)\n\n\nOnce the pipeline has completed running, your merge request is ready to\nmerge:\n\n\n![Screenshot of the GitLab Merge Request screen showing a successfully\ncompleted\npipeline](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683438/Blog/Content%20Images/merge.jpg)\n\n\nMerging the merge request will trigger the main branch pipeline. (Note that\nin this screen you will see a failed initial run of CI/CD on the main branch\ntoward the bottom of the screen. This is normal and is caused by the initial\nupload of `.gitlab-ci/yml` to the main branch without a Pulumi program being\npresent.)\n\n\n![Screenshot of the GitLab pipelines screen showing a running pipeline along\nwith a passed\npipelines](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683438/Blog/Content%20Images/piplines.jpg)\n\n\nIf you click into the main branch pipeline's execution, you can see your\nbucket has been created:\n\n\n![Screenshot of a GitLab pipeline log showing the output of the \"pulumi up\"\ncommand](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683438/Blog/Content%20Images/pulumi-up.jpg)\n\nTo delete the bucket, run the following command in your local clone of the\nrepository:\n\n\n```bash\n\npulumi destroy\n\n```\n\n\nAlternatively, you could create a merge request that removes the bucket from\nyour Pulumi program and run the pipelines again. Because Pulumi is\ndeclarative, removing the bucket from your program will delete it from AWS.\n\n\nFinally, run the `pulumi destroy` command again in the Pulumi program with\nyour OIDC and GitLab resources to finish cleaning up.\n\n\n## Next steps\n\n\nUsing IaC to define pipelines and other GitLab resources can greatly improve\nyour platform team's ability to reliably and quickly manage the resources to\nkeep application teams delivering. With Pulumi, you also get the power and\nexpressiveness of using popular programming languages to express those\nresources!\n\n\nIf you liked what you read here, here are some ways you can enhance your\nCI/CD pipelines:\n\n\n- Add [Pulumi Policy\nPacks](https://www.pulumi.com/docs/using-pulumi/crossguard/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources)\nto your pipeline: Pulumi policy packs allow you to validate that your\nresources are in compliance with your organization's security and compliance\npolicies. Pulumi's open source [Compliance Ready\nPolicies](https://www.pulumi.com/docs/using-pulumi/crossguard/compliance-ready-policies/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources)\nare a great place to start on your journey. Compliance Ready Policies\ncontain policy rules for the major cloud providers for popular compliance\nframeworks like PCI-DSS and ISO27001, and policy packs are easy to integrate\ninto your pipelines.\n\n- Check out [Pulumi ESC (Environments, Secrets, and\nConfiguration)](https://www.pulumi.com/product/esc/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources):\nPulumi ESC makes it easy to share static secrets like GitLab tokens and can\neven [generate dynamic secrets like AWS OIDC\ncredentials](https://www.pulumi.com/blog/esc-env-run-aws/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources).\nESC becomes especially useful when using Pulumi at scale because it reduces\nthe duplication of configuration and secrets that are used by multiple\nPulumi programs. You don't even have to use Pulumi IaC to benefit from\nPulumi ESC - [Pulumi ESC's command\nline](https://www.pulumi.com/docs/esc-cli/commands/?utm_source=GitLab&utm_medium=Referral&utm_campaign=Managing-GitLab-Resources)\ncan be used with any CLI tool like the AWS CLI.\n",[9,696,283,232],{"slug":4065,"featured":6,"template":700},"managing-gitlab-resources-with-pulumi","content:en-us:blog:managing-gitlab-resources-with-pulumi.yml","Managing Gitlab Resources With Pulumi","en-us/blog/managing-gitlab-resources-with-pulumi.yml","en-us/blog/managing-gitlab-resources-with-pulumi",{"_path":4071,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4072,"content":4078,"config":4083,"_id":4085,"_type":14,"title":4086,"_source":16,"_file":4087,"_stem":4088,"_extension":19},"/en-us/blog/medium-gitlab-saas-runners-on-linux-now-available-to-all-tiers",{"title":4073,"description":4074,"ogTitle":4073,"ogDescription":4074,"noIndex":6,"ogImage":4075,"ogUrl":4076,"ogSiteName":685,"ogType":686,"canonicalUrls":4076,"schema":4077},"Medium GitLab SaaS runners on Linux now available to all tiers","Free tier users can follow a few instructions to use medium SaaS runners on Linux to increase CI/CD pipleline speeds.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679838/Blog/Hero%20Images/fastlightrunner.jpg","https://about.gitlab.com/blog/medium-gitlab-saas-runners-on-linux-now-available-to-all-tiers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Medium GitLab SaaS runners on Linux now available to all tiers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabriel Engel\"}],\n        \"datePublished\": \"2023-08-01\",\n      }",{"title":4073,"description":4074,"authors":4079,"heroImage":4075,"date":4080,"body":4081,"category":1062,"tags":4082},[2192],"2023-08-01","\nFree tier users of GitLab can now experience shorter CI/CD execution times with medium GitLab [SaaS runners on Linux](https://docs.gitlab.com/ee/ci/runners/saas/linux_saas_runner.html). Previously, [Free tier](https://about.gitlab.com/pricing/) users could only use our small SaaS runner on Linux, which sometimes resulted in longer CI/CD execution times. As of now, though, our medium SaaS runners on Linux are available to all tiers.\n\nTo use the medium SaaS runners on Linux, simply add the `saas-linux-medium-amd64` tag in your project's `gitlab-ci.yml` file.\nUnder the hood, we spin up a fresh [GCP `n2d-standard-4`](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machine_types) VM for one-time use with 4 vCPUs, 16GB RAM, and 50GB storage attached.\n\nWe look forward to seeing our Free tier users increase their pipeline speeds.\n\n## References\n- [What are SaaS runners?](https://docs.gitlab.com/ee/ci/runners/)\n- [SaaS runners on Linux documentation](https://docs.gitlab.com/ee/ci/runners/saas/linux_saas_runner.html)\n- [Cost factor for SaaS runners](https://docs.gitlab.com/ee/ci/pipelines/cicd_minutes.html#additional-costs-on-gitlab-saas)\n",[9,696,1062],{"slug":4084,"featured":6,"template":700},"medium-gitlab-saas-runners-on-linux-now-available-to-all-tiers","content:en-us:blog:medium-gitlab-saas-runners-on-linux-now-available-to-all-tiers.yml","Medium Gitlab Saas Runners On Linux Now Available To All Tiers","en-us/blog/medium-gitlab-saas-runners-on-linux-now-available-to-all-tiers.yml","en-us/blog/medium-gitlab-saas-runners-on-linux-now-available-to-all-tiers",{"_path":4090,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4091,"content":4097,"config":4102,"_id":4104,"_type":14,"title":4105,"_source":16,"_file":4106,"_stem":4107,"_extension":19},"/en-us/blog/microcks-and-gitlab-part-one",{"title":4092,"description":4093,"ogTitle":4092,"ogDescription":4093,"noIndex":6,"ogImage":4094,"ogUrl":4095,"ogSiteName":685,"ogType":686,"canonicalUrls":4095,"schema":4096},"Speed up API and microservices delivery with Microcks and GitLab - Part 1","Learn how to configure Microcks for GitLab and what the use cases are for this open source Kubernetes-native tool.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683021/Blog/Hero%20Images/lightsticks.png","https://about.gitlab.com/blog/microcks-and-gitlab-part-one","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Speed up API and microservices delivery with Microcks and GitLab - Part 1\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Madou Coulibaly\"}],\n        \"datePublished\": \"2023-09-27\",\n      }",{"title":4092,"description":4093,"authors":4098,"heroImage":4094,"date":4099,"body":4100,"category":718,"tags":4101},[1485],"2023-09-27","\n\nAPI development is all the rage these days for customer and partner integration, frontend-to-backend communication, microservices orchestration, and more. Yet APIs have their challenges, including how to create a fast feedback loop on design, how different teams can work with autonomy without having to wait for each other's API implementation, and how to cope with backward compatibility tests when shipping newer versions of the API. \n\n[Microcks](https://microcks.io), an open source, Kubernetes-native tool for API mocking and testing, addresses these challenges. With Microcks, which is accepted as a Sandbox project in the [Cloud Native Computing Foundation](https://cncf.io), developers can leverage their [OpenAPI](https://www.openapis.org/), [GraphQL](https://graphql.org/), [gRPC](https://grpc.io/), [AsyncAPI](https://www.asyncapi.com/), and [Postman Collection](https://www.postman.com/collection/) assets to quickly mock and simulate APIs before writing them. Couple Microcks with GitLab and you have a powerful combination to foster collaboration, encourage rapid changes, and provide a robust delivery platform for API-based applications.\n\nIn this ongoing blog series, we will introduce you to Microcks use cases and how they fit with the GitLab platform. We'll also discuss technical integration points that will help ease the developer burden, including identity management, Git repositories, and pipeline integrations.\n\n## What is Microcks?\nMicrocks addresses two major use cases: \n- **Simulating (or mocking) an API or a microservice** from a set of descriptive assets. This can be done as soon as you start the design phase to set up a feedback loop very quickly, or later on to ease the pain of provisioning environments with a lot of dependencies.\n- **Validating the conformance of your application regarding your API specification** by running contract-test. This validation can be integrated into your CI/CD pipeline so that conformance can be checked on each and every iteration. This is of great help to enforce backward compatibility of your API of microservices interfaces.\n\nMicrocks offers a uniform and consistent approach for the various kinds of request/response APIs (REST, GraphQL, gRPC, Soap) and event-driven APIs (currently supporting eight different protocols), thereby bringing consistency for users and for automations all along your API lifecycle.\n\n## How Microcks fits into the software development lifecycle\nMicrocks is a solution based on containers and can be deployed in several configurations. It can be deployed on the developer laptop through [Docker](https://microcks.io/documentation/installing/docker-compose/), [Podman](https://microcks.io/documentation/installing/podman-compose/) or [Docker Desktop Extension](https://microcks.io/documentation/installing/docker-desktop-extension/) to assist with mocking complex environments. When it comes to team collaboration, Microcks can be deployed as a centralized instance that connects to the Git repositories of the organization, discovers the API artifacts, and then provides shared up-to-date API simulations.\n\n![diagram of how Microcks fits into development lifecycle](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/microcks.png){: .shadow.small.center}\n\nTo ease the burden on developers (and administrators), Microcks can be configured to use your GitLab platform as an identity provider. With that configuration, integrating Microcks is seamless, and API simulations are automatically shared among development teams. Microcks fosters collaboration by providing everyone with the same “source of truth” and avoiding drift risks. The tool can also be used to lower the pain and the cost of deploying and maintaining complex QA environments because simulations are inexpensive to deploy or redeploy on-demand. Microcks deployment follows a GitOps approach.\n\nBeyond this sharing of simulations, Microcks also integrates well with CI/CD pipelines. As you release API-based applications, there is always concern about conformance of the contractualized expectations you defined using specifications like OpenAPI, GraphQL, and the like. Usually, the hardest part isn't delivering the `1.0` of this API; problems come later when you're trying to deliver the `1.3`. This latest version must still be backward compatible with the 1.0 contract if you don't want to make your consumers angry and frustrated.\n\nThis conformance validation is very well assured by Microcks using contract-testing principles. So we encourage you to plug Microcks into some `test` related jobs in your GitLab pipeline and delegate this conformance validation to your Microcks instance.\n\n![microcks-in-gitlab-workflow](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/microcks-in-gitlab-workflow.png){: .shadow.medium.center}\n\n\nEmbedding Microcks conformance testing in your pipeline is actually easy thanks to our lightweight CLI that you'll integrate in pipeline jobs. You can choose to reuse an existing Microcks instance to record results and keep history of your success or pop up a new ephemeral instance as it's lightweight and fast to bootstrap.\n\n## How to set up GitLab as an identity provider in Microcks\n\nTo start off this series, we will detail how to configure Microcks to use your GitLab platform as an identity provider. This is in fact very easy as authentication in Microcks is based on [Keycloak](https://keycloak.org) (another CNCF project) and GitLab can be set as an identity provider in Keycloak (see [official documentation](https://www.keycloak.org/docs/latest/server_admin/index.html#gitlab)).\n\n**Note:** This configuration is optional as Microcks can use any other identity provider Keycloak integrates with.\n\nKeycloak is a very common solution that may be deployed already at your organization. If not, Microcks comes with a Keycloak distribution that is pre-configured for its usage with a realm called `microcks`. We have used this realm to validate this configuration.\n\n### Create a GitLab Group Application\nThe first thing is to create a new [Group Application](https://docs.gitlab.com/ee/integration/oauth_provider.html#create-a-group-owned-application) on your GitLab instance as follows:\n- `Name`: `microcks-via-keycloak`\n- `Redirect URI`: `https://keycloak.acme.org/realms/microcks/broker/gitlab/endpoint`\n- `Scopes`: `read_user`, `openid`, `profile` and `email`\n\n![gitlab-application-form](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/gitlab-application-form.png){: .shadow.medium.center}\n\n\nThis application uses your Keycloak instance with `https://keycloak.acme.org/realms/microcks/broker/gitlab/endpoint` as the redirect URI. As a result, we obtain an `Application ID` and an associated `Secret` we have to keep aside for the next step.\n\n![gitlab-application](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/gitlab-application.jpeg){: .shadow.medium.center}\n\n\n### Add GitLab as identity provider in Keycloak\nThe next step takes place in the Keycloak admin console. Once the correct `microcks` realm is selected, you'll just have to go to the **Identity providers** section and add a GitLab provider. Simply paste here the `Application ID` you got earlier as `Client ID` and the `Secret` as `Client Secret`. You can also choose a `Display order` if you plan to have multiple identity providers.\n\n![keycloak-identity-provider](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/keycloak-identity-provider.jpg){: .shadow.medium.center}\n\n\nThen, from the **Authentication** section in the admin console, choose the browser flow and configure the `Identity Provider Redirector` as follows:\n\n- `Alias`: `GitLab`\n- `Default Identify Provider`: `gitlab`\n\n![keycloak-redirector](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/keycloak-redirector.jpg){: .shadow.medium.center}\n\n### Test your Microcks configuration\nNow open the Microcks URL into your browser and you'll be directly redirected to the GitLab login page. Enter your GitLab credentials and you will be authenticated and redirected to Microcks. \n\n![microcks-homepage](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/microcks-homepage.jpeg){: .shadow.medium.center}\n\n## What's next?\nIn upcoming blogs, we'll detail how GitLab can be used in the two major use cases for Microcks. We'll see how Microcks integrates with GitLab Git repositories to discover API specifications and produce simulations, and how to integrate Microcks conformance tests into your GitLab CI/CD pipelines.\n\n_[Laurent Broudoux](https://www.linkedin.com/in/laurentbroudoux/) is a cloud-native architecture expert and enterprise integration problem lover. He has helped organizations in adopting distributed and cloud paradigms while capitalizing on their critical existing assets. He is the founder and lead developer of the [Microcks.io](https://microcks.io/) open-source project: a Kubernetes-native tool for API mocking and testing. For this, he is using his 10+ years experience as an architect in financial services where he defined API transformation strategies, including governance and delivery process._\n\n_[Madou Coulibaly](https://gitlab.com/madou) is a senior solutions architect at GitLab._\n",[1126,874,9,830,232],{"slug":4103,"featured":6,"template":700},"microcks-and-gitlab-part-one","content:en-us:blog:microcks-and-gitlab-part-one.yml","Microcks And Gitlab Part One","en-us/blog/microcks-and-gitlab-part-one.yml","en-us/blog/microcks-and-gitlab-part-one",{"_path":4109,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4110,"content":4115,"config":4120,"_id":4122,"_type":14,"title":4123,"_source":16,"_file":4124,"_stem":4125,"_extension":19},"/en-us/blog/migrating-from-jenkins",{"title":4111,"description":4112,"ogTitle":4111,"ogDescription":4112,"noIndex":6,"ogImage":3396,"ogUrl":4113,"ogSiteName":685,"ogType":686,"canonicalUrls":4113,"schema":4114},"Migrating from Jenkins","Best practices for making the switch to GitLab CI/CD.","https://about.gitlab.com/blog/migrating-from-jenkins","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Migrating from Jenkins\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-11-26\",\n      }",{"title":4111,"description":4112,"authors":4116,"heroImage":3396,"date":4117,"body":4118,"category":1040,"tags":4119},[715],"2019-11-26","\nMigrations feel daunting, which is one of the reasons teams put them off as long as possible. Even when tools are brittle or not working as they should, it’s the fear of the unknown that keeps us from making the plunge. Teams might have found workarounds to solve common problems but those only work... until they don’t work. If you know that you need to make a tool change or migration, it’s much better to do it early rather than during a crisis.\n\nMigrations don’t have to be scary. If you’re tired of brittle builds and endless plugin maintenance, migrating your CI/CD doesn’t have to be a headache. Several teams have [made the switch from Jenkins CI to GitLab CI/CD](/blog/5-teams-that-made-the-switch-to-gitlab-ci-cd/), and there are resources available to ease the transition.\n\n## From Jenkins to GitLab using Docker\n\nThe team at [Linagora](/blog/docker-my-precious/) loved that GitLab includes Git repository management, issue tracking, [code review](/stages-devops-lifecycle/create/), an IDE, activity streams, wikis, and built-in CI/CD to test, build, and deploy code. In order to take advantage of these all-in-one features, they needed to find a way to switch over from Jenkins CI. Luckily, GitLab’s Docker support and [documentation](https://docs.gitlab.com/ee/ci/docker/using_docker_images.html) allowed them to utilize custom Docker images, spin up services as part of testing, build new Docker images, and run on Kubernetes.\n\n### Running Jenkinsfiles in GitLab CI/CD\n\nOne short-term solution teams can use when migrating from Jenkins to GitLab CI/CD is [using Docker to run a Jenkinsfile in GitLab CI/CD](https://lackastack.gitlab.io/website/posts/gitlabci-jenkinsfile/) while the syntax is being updated. While this doesn’t address the endless [plugin dependencies](/blog/plugin-instability/), it’s a stop-gap measure that can get your team working in GitLab until the migration is complete.\n\n## Using Auto DevOps\n\n[Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/index.html) can potentially be used to build, test, and deploy your applications with little to no configuration needed at all. One of the more time-consuming tasks during a Jenkins migration can be converting the pipelines from Groovy to YAML, but Auto DevOps provides predefined CI/CD configurations – just push your code and Auto DevOps can build a default pipeline. Auto DevOps offers more features including security testing, performance testing, and code quality testing. If you need [advanced customizations](https://docs.gitlab.com/ee/topics/autodevops/index.html#customizing), you can modify the templates without having to start over on a completely different platform.\n\nGitLab senior solutions manager [Brendan O’Leary](/company/team/#brendan) provided a brief overview of how to convert a Jenkins pipeline built with Maven into a GitLab CI/CD pipeline using Auto DevOps.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/RlEVGOpYF5Y\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Advice from teams that made the switch\n\nAt our [GitLab Commit](/events/commit/) event in London, the team at adSoul, a Germany-based marketing automation company, discussed [their own transition from Jenkins to GitLab](/blog/adsoul-devops-transition-to-gitlab-ci/). They offered insight into their migration process, but for others considering GitLab CI/CD, here are some best practices:\n\n### Start small\n\nIn the spirit of iteration, it’s better to make incremental changes than try to tackle everything all at once. Even if it’s just small projects, or just running a Jenkinsfile in the meantime, be patient and aim for steady progress\n\n### Utilize tools effectively\n\nWith Docker and Auto DevOps, you have the tools available to ease the transition so you’re not reinventing the wheel.\n\n### Communicate clearly\n\nKeep teams informed of the process and communicate any changes. This can also apply to the naming of your new pipelines. Aim for clear job names, style your config for a better overview, and write comments for variables and hard-to-understand code.\n\nFor more information, check out our [migrating from Jenkins documentation](https://docs.gitlab.com/ee/ci/migration/jenkins.html).\n\nCover image by [Aryan Singh](https://unsplash.com/@wuzclicks?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/@wuzclicks?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n",[9,721],{"slug":4121,"featured":6,"template":700},"migrating-from-jenkins","content:en-us:blog:migrating-from-jenkins.yml","Migrating From Jenkins","en-us/blog/migrating-from-jenkins.yml","en-us/blog/migrating-from-jenkins",{"_path":4127,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4128,"content":4133,"config":4139,"_id":4141,"_type":14,"title":4142,"_source":16,"_file":4143,"_stem":4144,"_extension":19},"/en-us/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci",{"title":4129,"description":4130,"ogTitle":4129,"ogDescription":4130,"noIndex":6,"ogImage":2088,"ogUrl":4131,"ogSiteName":685,"ogType":686,"canonicalUrls":4131,"schema":4132},"Migrating from Bamboo Server to GitLab CI: Getting started","Theoretical reasoning and practical proposal on migrating an existing CI/CD infrastructure of some multi-component application from Bamboo Server to GitLab CI","https://about.gitlab.com/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to migrate Atlassian Bamboo Server's CI/CD infrastructure to GitLab CI, part one\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ivan Lychev\"}],\n        \"datePublished\": \"2022-07-06\",\n      }",{"title":4134,"description":4130,"authors":4135,"heroImage":2088,"date":4136,"body":4137,"category":718,"tags":4138},"How to migrate Atlassian Bamboo Server's CI/CD infrastructure to GitLab CI, part one",[3402],"2022-07-06","\n\nWhen I faced a task of migrating from `Atlassian Bamboo Server` to `GitLab CI/CD`, I was not able to find any comprehensive information regarding something similar. So I designed a process on my own. This demo shows how to migrate a CI/CD structure for an existing multi-component application from a discontinued [Atlassian Bamboo Server](https://www.atlassian.com/migration/assess/journey-to-cloud) to [GitLab CI/CD](https://docs.gitlab.com/ee/index.html) (Community Edition).\n\nThe accompanying repository is https://gitlab.com/iLychevAD/ci-cd-for-a-multi-component-app.\n\nIn this first part of a two-part series, you will find a description of the current state of affairs - i.e., how the CI/CD has been organized within Bamboo Server, how the Bamboo Build and Deploy plans are designed for bootstrapping infrastructure and deploying the components of the application, and the architecture of the application itself.\n\nAnd in part two, we'll take a deeper look at the virtues of `GitLab CI/CD`.\n\n## Initial state\n\n(Note: This is not a description of some particular project but more a kind of compilation of several projects I worked on.)\n\nThe application solution allows the client to fulfill a particular business purpose (the nature of which is not relevant here and thus not specified) and consists of more than 50 discrete components (further referred to as `applications` or just `apps` or `components`). I refrain from calling them microservices as each of them looks more like a full-fledged application communicating with other siblings using REST API and messages in Kafka topics. Some of them expose a web UI to external or internal users and some are just utility parts serving the needs of other components or performing internal operations, etc.\n\nCode for each app is stored in its own Git repository (further just `repo`). So, a `multi-repo` approach is used for them. Each app may be written in different languages and packaged as one or several OCI-images for deployment.\n\nEach app repo looks like:\n```\n📦 \u003Csome-app-git-repo>\n ┣ 📂src \u003C-- application source code\n ┣ 📂docker-compose\n ┃ ┗ 📜docker-compose.yml \u003C-- analogue of K8s manifests\n ┗ 📜Dockerfile \u003C-- conventionally, \"Dockerfile\" name is used for OCI image specification file\n```\n\nFor running the applications, the client uses an outdated orchestration system (one from pre-Kubernetes epoch). So each app repo contains a Docker-compose compatible file describing deployment directives for that outdated orchestration system (in essence, similar to Kubernetes Deployment manifests). \n\nFor all of the build and deploy activities Atlassian Bamboo Server is used. \n\nSome details for those not familiar with the Bamboo Server - in an opinionated manner it explicitly separates so-called `build` pipelines and `deployment` pipelines. The former are supposed to build application code and produce some artifacts for further deployment (in our case those artifacts are OCI images uploaded to OCI registry and docker-compose.yml files referring to those images). The latter ones are supposed to take some particular set of artifacts and apply them to some particular `environment`. An `environment` (referred to `env` in the future for brevity) here is just an abstract deployment target characterized by a set of environment variables attached to it and exposed to the apps deployed into it. In reality, an `env` is implemented as a set of resources (virtual machines, databases, object storage locations, etc.) required by the applications.\n\nIn Bamboo, one `build` pipeline usually corresponds to one `deployment` pipeline so when the latter is started it just takes the artifacts from the attached `build` pipeline as input. \n\nThe client uses a `production` env, `preproduction` env, and numerous (up to several hundreds) so-called `staging` (short-lived) envs where different development teams and software engineers can test various combinations of the apps (here we assume that they have ~80-100 distinguish components of the application solution and several hundreds of software developers which gives a lot of possible combinations and requires so many `staging` envs).\n\nRoughly, a configuration of a `deploy` pipeline consists of a specification of the source artifacts (which are provided by the attached `build` pipeline as described earlier) and a specification of the set of envs where those artifacts (effectively, an application) can be deployed to.\n\nCurrent installation uses sophisticated dynamic generation of envs set for each app deployment pipeline. Roughly speaking, they have a central configuration file with the list of all existing envs where for each env a list of apps allowed to be deployed to it is denoted. Each time the file is modified (i.e., an env is created or deleted), the deployment pipelines are automatically being updated so as in the result each of them contains a list of envs corresponding for each app. You will have more idea about this aspect when you have looked at the implementation section later.\n\nIn the Bamboo UI this looks like:\n\n![envs_list_on_build_result_page](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/envs_list_on_build_result_page.png)\n\nHere you can see an application build result page where on the right-hand side under the `Included in deployment project` title you can see a list of envs into which you can deploy the application. (Keep in mind that besides `build` and `deployment` pipelines, the Bamboo also uses a notion of `releases` - this is just some kind of an intermediate entity that should be created out of a build result to make it possible to deploy that build into some env). The `cloud-with-upwards-arrow` button in the `Actions` column starts a corresponding `deploy` pipeline with automatically passing the link to a build result (in a form of a `release` entity in Bamboo terminology) and the name of the env next to which the button has been clicked (the procedure of how a list of envs is created for a `deploy` pipe is described above).\n\nA concept of a `release` is specific to Bamboo Server, though it provides some amenities. For example, on the Release details page you can see a list of envs where a release has been deployed to. On the `Commits` tab you can backtrack a release to the application code in a SVC. And the `Issues` tab shows attached Jira tickets.\n\n![bamboo_release_details](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/bamboo_release_details.png)\nRelease details page\n{: .note.text-center}\n\nAn env details page also enumerates releases history for this env (in scope of one particular application though as an env is specified for each deployment pipeline individually):\n\n![bamboo_env_details](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/bamboo_env_details.png)\nEnv details page \n{: .note.text-center}\n\nAnd upon clicking the `cloud-with-upwards-arrow` button the Bamboo shows diff of Jira tickets and commits in respect to the previous `release` (only if both releases are made from artifacts from the same Git branch):\n\n![deploy_launch_page](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/deploy_launch_page.png)\nDeploy launch page\n{: .note.text-center}\n\nSo, in general, the current path from source control to an env for each app looks like:\n\n![svc_to_env_path](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/svc_to_env_path.png)\n\nThe Build plans are triggered automatically upon Git commits or Git tags. Most of the Deployment plans are started by the project members manually when needed. Each Deploy plan contains a step that checks if a user who started the plan has permissions to deploy into an env (for example, only members of the team which owns an env are allowed to deploy to that env and the deployment to the production env is allowed only for a set of eligible project members).\n\n## The task\n\nThe task is to migrate the aforementioned design from Bamboo Server to `GitLab` while keeping a similar deployment scheme (leveraging GitLab's `Environments` feature).\n\nAlso the following should be considered:\n\n - team members (software engineers, quality assurance specialists) are supposed to be able to manage environments on their own in a user-friendly self-service manner.\n - there should not be any discrepancy in IaC for different environments (per `12-factor apps` best practices), i.e. for any kind of an environment, be it a development or production one, the same set of IaC (here - Terraform files) should be used.\n  - the core ideas and workflows established in the previous situation (implemented with Atlassian Bamboo) should be kept to make the migration smoother for the members of the projects (also sometimes referred to as just users). \n\n## Implementation\n\n### Implementation's GitLab groups\\projects structure\n\n```\n📦 \u003CGitLab root group>\n ┣ 📂 apps GitLab group\n ┃ ┣ 📃 app1 GitLab project\n ┃ ┣  ...\n ┃ ┗ 📃 appN GitLab project\n ┣ 📂 ci GitLab group\n ┃ ┣ 📃 library GitLab project\n ┃ ┗ 📃 oci-registry GitLab project\n ┗ 📂 infra GitLab group\n  ┣ 📃 environment-blueprints GitLab project\n  ┣ 📃 environment-set GitLab project\n  ┗ 📃 k8s-gitops GitLab project\n```\n\n*Description*:\n\nThe most important content is in the `ci/library` repo (the shared ci configs) and `environment-set` repo. The other repos don't require much attention: The `k8s-gitops` purpose is not implemented and the repo is empty, the `apps` group just imitates source code for some apps, and the `ci/oci-registry` serves a role of an OCI registry for the solution.\n\nThe `apps` GitLab group merely contains the apps source code per se. Each GitLab project in this group corresponds to one app. Each app repo is expected to contain the source code itself (in the `src` directory for example), a `k8s` directory with k8s manifests, and an OCI image specification file (traditionally often called `Dockerfile`). \n\nThe `ci` GitLab group contains the `ci/library` project that holds shared `.gitlab-ci.yaml` files used by other projects (in a manner similar to Jenkins' shared libraries) and the `ci/oci-registry` serves as an OCI-image registry for various images used by the demo project (it also contains a Git repository with gitlab-ci files to build some utility images with tools used in various pipelines). For simplicity, the latter stores all the images throughout all the projects of the demo, though it's clearly not the best choice for a real-life situation when different sets of images of a set of separate projects/registries should be created.\n\nThe `infra` group holds applications infrastructure creation related Git repositories:\n\nThe `infra/k8s-gitops` is mostly irrelevant to the topic of this demo. In this demo it's presumed that Kubernetes is used as a computation workload platform and when a k8s cluster is created for an environment all the k8s manifests are supposed to be put into this repo (where each branch corresponds to a single environment) to be consumed by a GitOps tool installed into the cluster.\n\nThe `infra/environment-blueprints` holds parametrized IaC templates describing all the resources required for a full-fleged environment. In this example, the Terraform is used as an IaC tool though the principles are similar for its analogs (CloudFormation, for instance). The blueprints are parametrized in such manner that in the defaults values they hold some sensible values (most likely set to different values depending on the kind of a environment they were used to bootstrap - for example, a production env and everything else). It's implied that there might coexist several versions of the blueprints (implemented by using Git branches or Git tags) so each environment (see the next paragraph about `infra/environment-set`) can explicitly specify which version it wants to use (in case of using Terraform by specifying Git reference in the module's `source` field).\n\nHere I would like once again to highlight a digression from the best practices. For simplicity in the `infra/environment-blueprints` repo all the parts of an environment are combined into one single Terraform module (or a workspace, or a Stack in CloudFormation's terminology). In that way all the resources are always updated or changed within a single `terraform apply` command, which is cumbersome for large infrastructures containing a lot of resources. For larger infrastructures it would be more manageable to split into disparate Terraform modules (or CloudFormation Stacks, or Azure ARM Resource Groups) and thus make it possible for the infrastructure to be changed/updated in parts according to which exact components of it have changed. This might raise another question - how to manage dependencies in between such parts if they are present? For that, we would use some kind of an external (in respect to the IaC tool itself) orchestration tool like AWS Step Functions... or even GitLab's DAG feature!\n\nFinally, the `infra/environment-set` project represents an actual expected state of resources for each environment (a branch corresponds to an environment). See the README.md file in the Git repo for details. In short, each branch here is meant to contain a `main.tf` file referring to some version of the blueprints in the `infra/environment-blueprints` project, a set of Terraform files with overrides for any default variables set in the blueprints modules and other utility files like with a list of users allowed to deploy to the environment (such a list is to be checked by the deployments job in the apps projects).\n\n### **Important!**\n\nWhile looking at the implementation keep im mind that this solution deliberately omits some crucial aspects of any project infrastructure like security or monitoring, just for the sake of keeping this solution manageable and comprehensible. Implementing security and monitoring aspects would make the solution cumbersome and much longer to prepare. That is also true for the `k8s-gitops` repository - it's implied that in a real-life solution this would actively participate in the deployment process and hold Kubernetes clusters state in a GitOps approach but currently, this repo is just a placeholder. In the practical guide later you will see a description of the process of controlling environments using different branches in the `infra/environment-set` project. Ideally, such a workflow should use Merge Requests though for simplicity this implementation skips using MRs.\n\nAnother important thing that's possible not clear in this solution is configuration management, i.e. how configuration settings unique to each environment are provided to the applications inside an environment. Well, given that our applications run within Kubernetes cluster and that the cluster state is placed into a dedicated repo (`k8s-gitops` in our case), the configuration settings situation is simple - for each app the Terraform files in the `infra/environment-blueprints` should output all the sensible configuration values for the resources (like S3 bucket names, RDS endpoint URLs, etc.). Then, using Terraform itself or some other tool to create/update an environment, an additional step would collect all those outputs, transform them into k8s ConfigMap manifests, and put them into the GitOps repo. \n\nFor the secrets, we can go several ways. The most simplistic (though not flexible and not easy for secret rotation) way is to use some kind of encryption at rest like Mozilla's SOPS so that the secrets are being encrypted when they are put into the GitOps repo and decrypted when deployed into K8s. Another (and better ?) way - do not store secrets at rest at all but use either a third-party tool like Hashicorp Vault (with dynamic secrets generation) or cloud native features like [AWS IAM Roles for Service Accounts](https://aws.amazon.com/blogs/containers/diving-into-iam-roles-for-service-accounts/).\n\n## Bootstrap the demo\n\nThe accompanying repository, https://gitlab.com/iLychevAD/ci-cd-for-a-multi-component-app, contains Terraform files that enable you to install a copy of the demo structure into your own GitLab account to see it in action:\n\n`*.tf` files in the root directory and in the `tf_modules` directory describe the structure and configuration of the GitLab projects and groups. In the `repo_content` directory there is a content for the GitLab repositories in the projects. The repositories are filled with those files by the Terraform scripts.\n\nThe demo was tested with GitLab Community Edition `15.0.0-pre revision 4bda1cc84df`. The Terraform scripts do not create any real resources but just imitate them using `null_resource` and `local-exec`.\n\nThe bootstrapping process is conducted inside a container image (see the steps below) so it's platform-agnostic and in terms of tools all you need to spin up the demo is some containerization engine installed on your PC (i.e., Docker, Podman, etc).\n\n**Steps**:\n\n1. In the GitLab web UI manually create a root group to bootstrap the demo into (see `root_gitlab_group.tf` for a web-link why it's not possible to automate). Notice its ID - you need to provide it at the next step.\n\n2. Clone this repository.\n    Download an official Hashicorp's Terraform image and enter its interactive shell. All the further commands are supposed to be performed inside that shell:\n    \n    ```\n    docker run --rm -it --name ci-cd-for-a-multi-component-app \\\n      -e TF_VAR_gitlab_token=\u003Cyour GitLab account access token> \\\n      -v \u003Cpath to a location where to store ssh key-pairs on your PC>:/deploy-keys \\\n      -e TF_VAR_deploy_key_readwrite=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub \\\n      -e TF_VAR_deploy_key_readonly=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub \\\n      -e TF_VAR_root_gitlab_group_id=\u003CGitLab group ID> \\\n      -v \u003Cpath to the directory where you cloned the project into>:/repo -w /repo \\\n      --entrypoint /bin/sh \\\n      public.ecr.aws/hashicorp/terraform:1.1.9\n    ```\n    \n    Explanation:\n    \n    `-e TF_VAR_gitlab_token=\u003Cyour GitLab account access token>` - Terraform's `gitlab` provider needs a GitLab access token with sufficient permissions to spin up the demo. Provide it as a Bash environment variable - `TF_VAR_gitlab_token` (see `provider.tf`). It is also used by the `upload_avatar` module.\n    \n    `-v \u003Cpath to a location where to store ssh key-pairs on your PC>:/deploy-keys` - on the left-hand side here specify some directory on your local PC where you would like to store SSH keys needed for deploying the demo. Thus they are persisted even if you exit the container. See bullet point `4` for more details.\n    \n    `-e TF_VAR_deploy_key_readwrite=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key` and\n    \n    `-e TF_VAR_deploy_key_readonly=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key` - set the names for the aforementioned keys\n    \n    `-v \u003Cpath to the directory where you cloned the project into>:/repo -w /repo` - we mount the project content from your local PC into the running container. Note that because of that the Terraform local state file will be stored inside that directory on your PC.\n\n3. Install tools - bash and curl:\n    \n    ```\n    apk add bash curl\n \n    /bin/bash\n    ```\n\n4. Upon bootstrapping the demo, the repositories' content is pushed into (i.e. is restored) from the `repo_content` directory. (When the demo is destroyed the content of the repositories is automatically pulled (i.e. is saved) into the same directory - probably you dont need this but I implemented that for my convinience during creating the demo.) We need to create an SSH key pair and need it be the same throughout both phases. In this step we generate it:\n    \n    ```ssh-keygen -t rsa -N '' -f /deploy-keys/ci-cd-for-a-multi-component-app-deploy-key \u003C\u003C\u003C y```\n    \n    ```chmod 0400 /deploy-keys/ci-cd-for-a-multi-component-app-deploy-key```\n    \n    A trick used in `tf_modules/gitlab_project_with_restore_backup/main.tf` requires that in the host section of the SSH public key the location of the private key is specified (in a form like `filename@~/.ssh/\u003Cfilename>`). Otherwise the `tf_modules/gitlab_project_with_restore_backup` won't work. Edit accordingly:\n    \n    ```sed -i -e 's|^\\(ssh-rsa .*\\) \\(.*\\)$|\\1 ci-cd-for-a-multi-component-app-deploy-key@/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key|' /deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub```\n    ```\n\nNow you can proceed with bootstrapping the demo using Terraform:\n\nInitialize Terraform by `terraform init` so it installs all the providers.\n\nDeploy the demo with Terraform by `terraform apply`.\n\n**Notice**: During Terraform execution you may see an error:\n```\nError: POST https://gitlab.com/api/v4/projects/multi-component-app-root-group/ci/library/deploy_keys: 400 {message: {deploy_key.fingerprint_sha256: [has already been taken]}}\n\n```\nI believe this is some glitch in the GitLab API. To fix just run `terraform apply` once again until it shows no errors.\n\nAfter that you should see the following structure in GitLab in the root group:\n\n![gitlab_projects_tree](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/gitlab_projects_tree.png)\n\nAll the projects should be filled with files from the `repo_content` directory.\n\nDo not delete the directory with the cloned project and the files created inside it if later you would want to clean up the things. See the next section for instructions.\n\n## Cleaning up\n\nLaunch a container image the same way you did for bootstrapping the demo (see the previous section). It's supposed that you didnt delete any files in `\u003Cpath to a location where to store ssh key-pairs on your PC>` and `\u003Cpath to the direcory where you cloned the project into>`: \n\n```\ndocker run --rm -it --name ci-cd-for-a-multi-component-app \\\n  -e TF_VAR_gitlab_token=\u003Cyour GitLab account access token> \\\n  -v \u003Cpath to a location where to store ssh key-pairs on your PC>:/deploy-keys \\\n  -e TF_VAR_deploy_key_readwrite=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub \\\n  -e TF_VAR_deploy_key_readonly=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub \\\n  -e TF_VAR_root_gitlab_group_id=\u003CGitLab group ID> \\\n  -v \u003Cpath to the direcory where you cloned the project into>:/repo -w /repo \\\n  --entrypoint /bin/sh \\\n  public.ecr.aws/hashicorp/terraform:1.1.9\n```\n\nInstall curl:\n\n```apk add curl```\n\nDo `terraform destroy`.\n\n**Notice**: You may see some errors regarding deleting the `oci-registry` project with OCI images. In that case just delete the images and remove the project manually or wait while GitLab does that itself later.\n\nNow if you want you can remove the cloned project directory and the `\u003Cpath to a location where to store ssh key-pairs on your PC>` directory.\n\nIf you would like to deploy the demo once again without removing the directory with the cloned repo dont forget to remove files created during the previous demo deployment, namely `terraform.tfstate` files in the root directory and `.git` directories everywhere in the `repo_content` directory.\n\nIn the [second part](/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two/) of this tutorial, we'll look at a real-world example of how this can work.\n\n\n\n\n\n",[9,721,917],{"slug":4140,"featured":6,"template":700},"migration-from-atlassian-bamboo-server-to-gitlab-ci","content:en-us:blog:migration-from-atlassian-bamboo-server-to-gitlab-ci.yml","Migration From Atlassian Bamboo Server To Gitlab Ci","en-us/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci.yml","en-us/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci",{"_path":4146,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4147,"content":4153,"config":4159,"_id":4161,"_type":14,"title":4162,"_source":16,"_file":4163,"_stem":4164,"_extension":19},"/en-us/blog/mobile-devops-with-gitlab-part-1",{"title":4148,"description":4149,"ogTitle":4148,"ogDescription":4149,"noIndex":6,"ogImage":4150,"ogUrl":4151,"ogSiteName":685,"ogType":686,"canonicalUrls":4151,"schema":4152},"Mobile DevOps: Code signing with project-level secure files","An introduction to mobile code signing with the new Project-level Secure Files feature.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668629/Blog/Hero%20Images/refargotohp-mzZp_9QpYLc-unsplash.jpg","https://about.gitlab.com/blog/mobile-devops-with-gitlab-part-1","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Mobile DevOps with GitLab, Part 1 - Code signing with Project-level Secure Files\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darby Frey\"}],\n        \"datePublished\": \"2022-09-20\",\n      }",{"title":4154,"description":4149,"authors":4155,"heroImage":4150,"date":4156,"body":4157,"category":741,"tags":4158},"Mobile DevOps with GitLab, Part 1 - Code signing with Project-level Secure Files",[913],"2022-09-20","Mobile teams face some unique challenges when it comes to establishing\nDevOps practices. Build tools are different, release and approval cycles\nwith app stores can be slower and introduce more risk, and some applications\nrequire specialized runners. At GitLab, we are focused on finding solutions\nto these challenges to make it easier for [everyone to\ncontribute](/company/mission/#everyone-can-contribute)! Starting with mobile\ncode signing.\n\n\nThis post is the first in a series on mobile DevOps and it shows how GitLab\nmakes code signing easier using a new feature called Project-level Secure\nFiles.\n\n\n## A brief introduction to mobile code signing\n\n\nAndroid and iOS projects require special configuration files for secure\napplication code signing to ensure an application on a user's device hasn't\nbeen tampered with. These configuration files can be challenging to manage\nin a [CI environment](/topics/ci-cd/benefits-continuous-integration/).\nKeystores, signing certificates, and provisioning profiles shouldn't be\nstored in version control because they contain sensitive information. These\nfiles are also binary (not text), so they can't easily be stored as CI\nvariables.\n\n\nTo make this process easier, [we've introduced a feature in GitLab 15.0\ncalled Project-level Secure\nFiles](/releases/2022/05/22/gitlab-15-0-released/#project-level-secure-files-in-open-beta).\nThis feature allows these files to be stored securely as part of a GitLab\nproject but outside version control. Secure Files can then easily be loaded\ninto a CI job when it's time to execute the code signing process.\n\n\nGet started by adding a secure file to a project:\n\n\n1. On the top bar, select **Menu > Projects** and find your project.\n\n2. On the left sidebar, select **Settings > CI/CD**.\n\n3. In the **Secure Files** section, select **Expand**.\n\n4. Select **Upload File**.\n\n5. Find the file to upload, select **Open**, and the file upload begins\nimmediately. The file shows up in the list when the upload is complete.\n\n\n![Upload Secure\nFile](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-1-introducing-project-level-secure-files/upload-secure-file.png)\n\n\n![List Secure\nFiles](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-1-introducing-project-level-secure-files/list-secure-files.png)\n\n\nWith the files securely stored with the project, the next step is to load\nthem into a [CI/CD](/topics/ci-cd/) job. To use your secure files in a CI/CD\njob, you must use the\n[download-secure-files](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/download-secure-files)\ntool to download the files in the job. After downloading them, these files\ncan be used in any CI job.\n\n\nAdd a command in the script section of your job to download the\ndownload-secure-files tool and execute it. It's also important to specify\nthe download location for the secure files by setting the desired path in\nthe `SECURE_FILES_DOWNLOAD_PATH` [CI/CD\nvariable](https://docs.gitlab.com/ee/ci/variables/index.html).\n\n\nFor example:\n\n\n```\n\ntest:\n  variables:\n    SECURE_FILES_DOWNLOAD_PATH: './where/files/should/go/'\n  script:\n    - curl --silent \"https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/download-secure-files/-/raw/main/installer\" | bash\n```\n\n\nNow, when the CI job runs, all of the secure files will be available in the\nlocation specified. They can then be passed into a build script or loaded\ninto the Apple keychain. \n\n\nThat's it! Give it a try, and let us know what you think in the [feedback\nissue](https://gitlab.com/gitlab-org/gitlab/-/issues/362407).\n\n\nNext time we will walk through [how to set up code signing for an Android\napp](/blog/mobile-devops-with-gitlab-part-2/).\n\n\nCover image by \u003Ca\nhref=\"https://unsplash.com/@refargotohp?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">refargotohp\u003C/a>\non \u003Ca\nhref=\"https://unsplash.com/s/photos/mobile-app-building?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>.\n",[999,721,9],{"slug":4160,"featured":6,"template":700},"mobile-devops-with-gitlab-part-1","content:en-us:blog:mobile-devops-with-gitlab-part-1.yml","Mobile Devops With Gitlab Part 1","en-us/blog/mobile-devops-with-gitlab-part-1.yml","en-us/blog/mobile-devops-with-gitlab-part-1",{"_path":4166,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4167,"content":4173,"config":4178,"_id":4180,"_type":14,"title":4181,"_source":16,"_file":4182,"_stem":4183,"_extension":19},"/en-us/blog/mobile-devops-with-gitlab-part-2",{"title":4168,"description":4169,"ogTitle":4168,"ogDescription":4169,"noIndex":6,"ogImage":4170,"ogUrl":4171,"ogSiteName":685,"ogType":686,"canonicalUrls":4171,"schema":4172},"Mobile DevOps with GitLab, Part 2 - Code signing for Android with GitLab","This second part of our tutorial series shows how to use Project-level Secure Files to sign an Android application.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668592/Blog/Hero%20Images/teddy-gr--adWwTRAm1g-unsplash.jpg","https://about.gitlab.com/blog/mobile-devops-with-gitlab-part-2","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Mobile DevOps with GitLab, Part 2 - Code signing for Android with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darby Frey\"}],\n        \"datePublished\": \"2022-09-28\",\n      }",{"title":4168,"description":4169,"authors":4174,"heroImage":4170,"date":4175,"body":4176,"category":741,"tags":4177},[913],"2022-09-28","In Part 1 of this tutorial series, we talked about a new feature in GitLab\ncalled [Project-level Secure\nFiles](/blog/mobile-devops-with-gitlab-part-1/). With\nProject-level Secure Files, you can securely store your build keys as part\nof your project in GitLab, and avoid\n[some](https://www.reddit.com/r/androiddev/comments/a4ydhj/how_to_update_app_when_lost_keystore_file/)\n[painful](https://www.reddit.com/r/gamemaker/comments/v98den/lost_keystore_for_publishing_to_google_play_store/)\n[problems](https://www.reddit.com/r/androiddev/comments/95oa55/is_there_anyway_to_update_my_app_after_having/)\ncaused by lost keystore files.\n\n\nIn this blog post, I'll show you how to create a Keystore file and use it to\nsign an Android application. Then I'll show you how to quickly create a CI\npipeline in GitLab using Project-level Secure Files.\n\n\n## Generate a private signing key\n\n\nThe first thing you'll need is a Keystore file. This file is used to\nsecurely sign the application. You can generate a Keystore file from your\nmachine by running the following command:\n\n\n```\n\nkeytool -genkey -v -keystore release-keystore.jks -alias release -keyalg RSA\n-keysize 2048 -validity 10000\n\n```\n\n\nDuring this process, you'll be asked to create a new password for the\nKeystore file and provide some information about you and your organization.\nSee the example below:\n\n\n![Generate Android\nKeystore](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-2-code-signing-for-android-with-gitlab/generate-keystore.png)\n\n\n\n## Configure your application\n\n\nThe next step is to set some environment variables and update build.gradle\nto add the new signing configuration. First, set the following environment\nvariables in either a .env file or in the shell via export.\n\n\n* `ANDROID_KEY_ALIAS` is the alias you gave for the key in the keytool\ncommand above. In this example the value is release.\n\n* `ANDROID_KEYSTORE_PASSWORD` is the new password you supplied to the\nkeytool command above.\n\n* `ANDROID_KEY_STOREFILE` is the path to the new keystore file you just\ncreated. In this example we're using `../release-keystore.jks`.\n\n\nWith the environment variables set, the next step is to update the build\nconfiguration to use the new Keystore in the build process. In the\n`app/build.gradle` file add the following configuration inside the Android\nblock for the release signing config.\n\n\n```\n\nandroid {\n    ...\n    defaultConfig { ... }\n    signingConfigs {\n        release {\n           storeFile file(System.getenv('ANDROID_KEY_STOREFILE'))\n           storePassword System.getenv('ANDROID_KEYSTORE_PASSWORD')\n           keyAlias System.getenv('ANDROID_KEY_ALIAS')\n           keyPassword System.getenv('ANDROID_KEYSTORE_PASSWORD')\n        }\n    }\n    buildTypes {\n        release {\n            ...\n            signingConfig signingConfigs.release\n        }\n    }\n}\n\n```\n\n\nSave these changes to the `app/build.gradle file`, and run the build locally\nto ensure everything works. Use the following command to run the build:\n\n\n```\n\n./gradlew assembleRelease\n\n```\n\n\nIf everything worked you'll see a message saying **BUILD SUCCESSFUL**.\n\n\n## Configure project\n\n\nWith the build running locally, it takes just a couple of steps to get it\nrunning in GitLab [CI](/topics/ci-cd/). The first step is to upload your\nKeystore file in GitLab. \n\n\n1. On the top bar, select **Menu > Projects** and find your project.\n\n2. On the left sidebar, select **Settings > CI/CD**.\n\n3. In the **Secure Files** section, select **Expand**.\n\n4. Select **Upload File**.\n\n5. Find the file to upload, select **Open**, and the file upload begins\nimmediately. The file shows up in the list when the upload is complete.\n\n\n![Upload Secure\nFile](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-2-code-signing-for-android-with-gitlab/upload-secure-file.png)\n\n\n![List Secure\nFiles](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-2-code-signing-for-android-with-gitlab/list-secure-files.png)\n\n\nThe next step is to set the CI variables in your project. \n\n\n1. On the top bar, select **Menu > Projects** and find your project.\n\n2. On the left sidebar, select **Settings > CI/CD**.\n\n3. In the **Variables** section, select **Expand**.\n\n4. Create entries for the three environment variables set earlier:\n`ANDROID_KEY_ALIAS`, `ANDROID_KEY_STOREFILE`, `ANDROID_KEYSTORE_PASSWORD`.\n\n\n![List Secure\nFiles](https://about.gitlab.com/images/blogimages/2022-09-19-mobile-devops-with-gitlab-part-2-code-signing-for-android-with-gitlab/list-ci-variables.png)\n\n\n## CI/CD pipelines\n\n\nOnce the project is configured, the final step is to create the build\nconfiguration in the `.gitlab-ci.yml` file. Below is a sample file.\n\n\n```\n\nstages:\n  - build\n\nbuild_android:\n  image: fabernovel/android:api-31-v1.6.1\n  stage: build\n  variables:\n    SECURE_FILES_DOWNLOAD_PATH: './'\n  script:\n    - apt update && apt install -y curl\n    - curl --silent \"https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/download-secure-files/-/raw/main/installer\" | bash\n    - ./gradlew assembleRelease\n  artifacts:\n    paths:\n      - app/build/outputs/apk/release\n```\n\n\nA few interesting bits from this configuration:\n\n\n1. Image:\n[https://github.com/faberNovel/docker-android](https://github.com/faberNovel/docker-android)\nprovides a collection of prebuilt Docker images that work great for CI\nsystems. Find the right version for your project in Docker Hub\n[https://hub.docker.com/r/fabernovel/android/tags](https://hub.docker.com/r/fabernovel/android/tags). \n\n2. Script: Depending on the image, you may need to install curl; the first\nline of the example script installs curl to be used in the second line to\ndownload and execute the\n[download-secure-files](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/download-secure-files)\ntool.\n\n3. Variables: `SECURE_FILES_DOWNLOAD_PATH` tells\n[download-secure-files](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/download-secure-files)\nwhere to download the Keystore file.\n\n4. Artifacts: Make the build output available to be downloaded from the CI\njob, or used in subsequent jobs in the pipeline.\n\n\nCommit the changes to your `.gitlab-ci.yml` file and after you push the\nchanges to GitLab the build will start.\n\n\nTake a look at [this branch in the sample\nproject](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/android_demo/-/tree/basic_build)\nfor reference.\n\n\nGive it a try, and let us know what you think in the [feedback\nissue](https://gitlab.com/gitlab-org/gitlab/-/issues/362407). Then, check\nout Part 3, which deals with [code signing for\niOS](/blog/mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane/). \n\n\n\n\n_Cover image by  \u003Ca\nhref=\"https://unsplash.com/@teddygr?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Teddy\nGR\u003C/a> on \u003Ca\nhref=\"https://unsplash.com/s/photos/google-phone?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>_\n",[721,999,695,9],{"slug":4179,"featured":6,"template":700},"mobile-devops-with-gitlab-part-2","content:en-us:blog:mobile-devops-with-gitlab-part-2.yml","Mobile Devops With Gitlab Part 2","en-us/blog/mobile-devops-with-gitlab-part-2.yml","en-us/blog/mobile-devops-with-gitlab-part-2",{"_path":4185,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4186,"content":4191,"config":4196,"_id":4198,"_type":14,"title":4199,"_source":16,"_file":4200,"_stem":4201,"_extension":19},"/en-us/blog/modernize-your-ci-cd",{"title":4187,"description":4188,"ogTitle":4187,"ogDescription":4188,"noIndex":6,"ogImage":1440,"ogUrl":4189,"ogSiteName":685,"ogType":686,"canonicalUrls":4189,"schema":4190},"3 CI/CD challenges to consider","If these DevOps challenges hit close to home, the right CI/CD could be the answer.","https://about.gitlab.com/blog/modernize-your-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 CI/CD challenges to consider\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-06-05\",\n      }",{"title":4187,"description":4188,"authors":4192,"heroImage":1440,"date":4193,"body":4194,"category":1040,"tags":4195},[715],"2019-06-05","\n[Continuous integration and delivery](/solutions/continuous-integration/) helps DevOps teams ship higher quality software, faster. But is all [CI/CD](/topics/ci-cd/) created equal? What does successful CI/CD implementation look like and how do you know you’re on the right track?\n\nIn this four-part series, we talk about modernizing your CI/CD: Challenges, impact, outcomes, and solutions. Today, we’ll focus on [DevOps](/topics/devops/) challenges and situations where a comprehensive CI/CD approach could be the answer you’ve been looking for.\n\nIf these problems hit a little too close to home, stay tuned for part two where we dive deeper into how these roadblocks impact the rest of the SDLC.\n\n## What challenges do I face?\n\n### 1. Maintenance and integration costs, predominantly human resources costs.\n\nA large percentage of the overall IT budget goes to support teams of engineers needed to integrate and maintain a complex toolchain. An enterprise company with 1,000 developers could need up to 40 engineers just to maintain the DevOps toolchain instead of allocating these resources towards delivering business value.\n\n### 2. Development is slowed/blocked by the operations team.\n\nThe quintessential challenge of the pre-DevOps world is that dev teams are incentivized to increase innovation velocity by shipping new features. Operations teams are incentivized for stability, uptime, and error reduction. The higher the development velocity, the greater the chance for downtime and errors – so these teams are naturally at odds with each other. Dev leaders don’t always have enough enticing evidence or incentive to go to the Ops team to advocate for increased deployment velocity, and vice versa.\n\n### 3. Developers doing ops.\n\nToday, teams and individual developers base the code they produce on the capabilities of their environment rather than the needs of the business.\n\n## What do these look like in practice?\n\n### A big portion of resources and budget goes to undifferentiated integration and maintenance.\n\nTeams are siloed by their tools – each team has their favorite and is optimized to work within these specialized tools only. It is difficult to collaborate and troubleshoot across the stack due to a lack of visibility.\n\n### Code sometimes never gets to production at all.\n\nThere is a delay between code being written and driving value. When problems or errors arise and need to be sent back to the developer, it becomes difficult to troubleshoot because the code isn’t fresh in their mind (context switching). They have to stop working on their current project and go back to the previous code to troubleshoot. So much time might have passed that the code is no longer deployable in its current state. In addition to wasting time and money, this is demoralizing for the developer who doesn’t get to see the fruit of their labor.\n\n### Developers worry about environments, not business logic.\n\nEnvironment dependencies and configuration distracts developers from tasks they’re better equipped to handle. They may even be spending time trying to decide what size VM they need to deploy to. In this order “DevOps” means “Developers have to do both dev and ops.” Only a small percentage of developers actually enjoy this arrangement with most asking, “I’m a developer, please stop asking me to do operations.”\n\nIf you’ve already implemented CI/CD but are still experiencing these roadblocks, it might be time to modernize your CI/CD. We invite you to compare GitLab CI/CD to other CI tools and see why we were rated #1 in the Forrester CI Wave™.\n\n[Explore GitLab CI/CD](/solutions/continuous-integration/)\n{: .alert .alert-gitlab-purple .text-center}\n\nPhoto by [Jungwoo Hong](https://unsplash.com/photos/cYUMaCqMYvI?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/arrow?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[721,9,875],{"slug":4197,"featured":6,"template":700},"modernize-your-ci-cd","content:en-us:blog:modernize-your-ci-cd.yml","Modernize Your Ci Cd","en-us/blog/modernize-your-ci-cd.yml","en-us/blog/modernize-your-ci-cd",{"_path":4203,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4204,"content":4210,"config":4216,"_id":4218,"_type":14,"title":4219,"_source":16,"_file":4220,"_stem":4221,"_extension":19},"/en-us/blog/multi-account-aws-sam-deployments-with-gitlab-ci",{"title":4205,"description":4206,"ogTitle":4205,"ogDescription":4206,"noIndex":6,"ogImage":4207,"ogUrl":4208,"ogSiteName":685,"ogType":686,"canonicalUrls":4208,"schema":4209},"How to set up multi-account AWS SAM deployments with GitLab CI/CD","Our guest author, an AWS Serverless hero, shares how to automate SAM deployments using GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666959/Blog/Hero%20Images/gitlab-aws-cover.png","https://about.gitlab.com/blog/multi-account-aws-sam-deployments-with-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to set up multi-account AWS SAM deployments with GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Forrest Brazeal\"}],\n        \"datePublished\": \"2019-02-04\",\n      }",{"title":4205,"description":4206,"authors":4211,"heroImage":4207,"date":4213,"body":4214,"category":718,"tags":4215},[4212],"Forrest Brazeal","2019-02-04","I've been working with [serverless](/topics/serverless/) applications in AWS\nfor about three years – that makes me an old salt in serverless terms! So I\nknow that deploying and maintaining a serverless app can be tricky; the\ntooling often has critical gaps.\n\n\nAWS's [SAM (Serverless Application\nModel)](https://aws.amazon.com/serverless/sam/) is an open source framework\nthat makes it easier to define AWS resources – such as Lambda functions, API\nGateway APIs and DynamoDB tables – commonly used in serverless applications.\nOnce you lay out your app in a SAM template, the next thing you need is a\nconsistent, repeatable way to get that template off your laptop and deployed\nin the cloud.\n\n\nYou need CI/CD.\n\n\nI've used several different [CI/CD systems](/topics/ci-cd/) to automate SAM\ndeployments, and I always look for the following features:\n\n\n- A single deployment pipeline that can build once and securely deploy to\nmultiple AWS accounts (dev, staging, prod).\n\n- Dynamic feature branch deployments, so serverless devs can collaborate in\nthe cloud without stepping on each other.\n\n- Automated cleanup of feature deployments.\n\n- Review of our SAM application directly integrated with the CI/CD tool's\nuser interface.\n\n- Manual confirmation before code is released into production.\n\n\nIn this post, we'll find out how [GitLab\nCI](/solutions/continuous-integration/) can check these boxes on its way to\ndelivering effective CI/CD for AWS SAM. You can follow along using [the\nofficial example code, available\nhere](https://gitlab.com/gitlab-examples/aws-sam).\n\n\n## Multi-account AWS deployments\n\n\nWe'll want to set up our deployment pipeline across multiple AWS accounts,\nbecause accounts are the only true security boundary in AWS. We don't want\nto run any risk of deploying prod data in dev, or vice versa. Our\nmulti-account setup will look something like this:\n\n\nAny time we work with multiple AWS accounts, we need cross-account [IAM\nroles](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) in\norder to authorize deployments. We'll handle this task through the following\nsteps. (All referenced scripts are available in the [example\nrepo](https://gitlab.com/gitlab-examples/aws-sam))\n\n\n### 1\\. Establish three AWS accounts for development, staging, and\nproduction deployments\n\n\nYou can use existing AWS accounts if you have them, or [provision new ones\nunder an AWS\nOrganization](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html).\n\n\n### 2\\. Set up GitLab IAM roles in each account\n\n\nRun the following AWS CLI call with admin credentials in each of the three\naccounts:\n\n\n```\n\naws cloudformation deploy --stack-name GitLabCIRoles --template-file\nsetup-templates/roles.yml --capabilities CAPABILITY_NAMED_IAM\n--parameter-overrides CIAccountID=\"\u003CAWS Account ID where your GitLab CI/CD\nrunner lives>\" CIAccountSTSCondition=\"\u003CThe aws:userid for the IAM principal\nused by the Gitlab runner>\"\n  ```\n\nReplace `CIAccountID` and `CIAccountSTSCondition` as indicated with values\nfrom the AWS account where your GitLab CI/CD runner exists. (Need help\nfinding the `aws:userid` for your runner’s IAM principal? Check out [this\nguide](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable).)\n\n\nThis CloudFormation template defines two roles: `SharedServiceRole` and\n`SharedDeploymentRole`. The `SharedServiceRole` is assumed by the GitLab\nCI/CD runner when calling the AWS CloudFormation service. This role trusts\nthe GitLab CI/CD runner's role. It has permissions to call the\nCloudFormation service, pass a role via IAM, and access S3 and CloudFront:\nnothing else. This role is not privileged enough to do arbitrary AWS\ndeployments on its own.\n\n\nThe `SharedDeploymentRole`, on the other hand, has full administrative\naccess to perform any AWS action. A such, it cannot be assumed directly by\nthe GitLab CI/CD runner. Instead, this role must be \"passed\" to\nCloudFormation using the service's `RoleArn` parameter. The CloudFormation\nservice trusts the `SharedDeploymentRole` and can use it to deploy whatever\nresources are needed as part of the pipeline.\n\n\n### 3\\. Create an S3 bucket for CI artifacts\n\n\nGrab the AWS account ID for each of your development, staging, and\nproduction accounts, then deploy this CloudFormation template **in the\naccount where your GitLab CI/CD Runner exists**:\n\n\n`aws cloudformation deploy --stack-name GitLabCIBucket --template-file\nsetup-templates/ci-bucket.yml --parameter-overrides DevAwsAccountId=\"\u003CAWS\nAccount ID for dev>\" StagingAwsAccountId=\"\u003CAWS Account ID for staging>\"\nProdAwsAccountId=\"\u003CAWS Account ID for prod>\" ArtifactBucketName=\"\u003CA unique\nname for your bucket>\"`\n\n\nThis CloudFormation template creates a centralized S3 bucket which holds the\nartifacts created during your pipeline run. Artifacts are created once for\neach branch push and reused between staging and production. The bucket\npolicy allows the development, test, and production accounts to reference\nthe same artifacts when deploying CloudFormation stacks -- checking off our\n\"build once, deploy many\" requirement.\n\n\n### 4\\. Assume the `SharedServiceRole` before making any cross-account AWS\ncalls\n\nWe have provided the script `assume-role.sh`, which will assume the provided\nrole and export temporary AWS credentials to the current shell. It is\nsourced in the various `.gitlab-ci.yml` build scripts.\n\n\n## Single deployment pipeline\n\n\nThat brings us to the `.gitlab-ci.yml` file you can see at the root of our\nexample repository. GitLab CI/CD is smart enough to dynamically create and\nexecute the pipeline based on that template when we push code to GitLab. The\nfile has a number of variables at the top that you can tweak based on your\nenvironment specifics.\n\n\n### Stages\n\n\nOur Gitlab CI/CD pipeline contains seven possible stages, defined as\nfollows:\n\n\n![Multi-account AWS SAM deployment model with GitLab\nCI](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/deployment-model.png){:\n.shadow.medium.center}\n\n\n```yaml\n\nstages:\n - test\n - build-dev\n - deploy-dev\n - build-staging\n - deploy-staging\n - create-change-prod\n - execute-change-prod\n```\n\n\n![Deployment lifecycle\nstages](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/deployment-lifecycle-stages.png){:\n.shadow.medium.center}\n\n\n\"Stages\" are used as a control flow mechanism when building the pipeline.\nMultiple build jobs within a stage will run in parallel, but all jobs in a\ngiven stage must complete before any jobs belonging to the next stage in the\nlist can be executed.\n\n\nAlthough seven stages are defined here, only certain ones will execute,\ndepending on what kind of Git action triggered our pipeline. We effectively\nhave three stages to any deployment: a \"test\" phase where we run unit tests\nand dependency scans against our code, a \"build\" phase that packages our SAM\ntemplate, and a \"deploy\" phase split into two parts: creating a\n[CloudFormation change\nset](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)\nand then executing that change set in the target environment.\n\n\n#### Test\n\n\nOur `.gitlab-ci.yml` file currently runs two types of tests: unit tests\nagainst our code, and dependency scans against our third-party Python\npackages.\n\n\n##### Unit tests\n\n\nUnit tests run on every branch pushed to the remote repository. This\nbehavior is defined by the `only: branches` property in the job shown below:\n\n\n```yaml\n\ntest:unit:\n stage: test\n only:\n   - branches\n script: |\n   if test -f requirements.txt; then\n       pip install -r requirements.txt\n   fi\n   python -m pytest --ignore=functions/\n```\n\n\nEvery GitLab CI/CD job runs a script. Here, we install any dependencies,\nthen execute Python unit tests.\n\n\n##### Dependency scans\n\n\n[Dependency\nscans](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/),\nwhich can take a few minutes, run only on code pushed to the master branch;\nit would be counterproductive for developers to wait on them every time they\nwant to test code.\n\n\nThese scans use a hardcoded, standard Docker image to mount the code and run\n\"Docker in Docker\" checks against a database of known package\nvulnerabilities. If a vulnerability is found, the pipeline will log the\nerror without stopping the build (that's what the `allow-failure: true`\nproperty does).\n\n\n#### Build\n\n\nThe build stage turns our SAM template into CloudFormation and turns our\nPython code into a valid AWS Lambda deployment package. For example, here's\nthe `build:dev` job:\n\n\n```yaml\n\nbuild:dev:\n stage: build-dev\n \u003C\u003C: *build_script\n variables:\n   \u003C\u003C: *dev_variables\n artifacts:\n   paths:\n     - deployment.yml\n   expire_in: 1 week\n only:\n   - branches\n except:\n   - master\n```\n\n\nWhat's going on here? Note first the combination of `only` and `except`\nproperties to ensure that our development builds happen only on pushes to\nbranches that aren't `master`. We're referring to `dev_variables`, the set\nof development-specific variables defined at the top of `.gitlab-ci.yml`.\nAnd we're running a script, pointed to by `build_script`, which packages our\nSAM template and code for deployment using the `aws cloudformation package`\nCLI call.\n\n\nThe artifact `deployment.yml` is the CloudFormation template output by our\npackage command. It has all the implicit SAM magic expanded into\nCloudFormation resources. By managing it as an artifact, we can pass it\nalong to further steps in the build pipeline, even though it isn't committed\nto our repository.\n\n\n#### Deploy\n\nOur deployments use AWS CloudFormation to deploy the packaged application in\na target AWS environment.\n\n\nIn development and staging environments, we use the `aws cloudformation\ndeploy` command to create a change set and immediately execute it. In\nproduction, we put a manual \"wait\" in the pipeline at this point so you have\nthe opportunity to review the change set before moving onto the \"Execute\"\nstep, which actually calls `aws cloudformation execute-changeset` to update\nthe underlying stack.\n\n\nOur deployment jobs use a helper script, committed to the top level of the\nexample repository, called `cfn-wait.sh`. This script is needed because the\n`aws cloudformation` commands don't wait for results; they report success as\nsoon as the stack operation starts. To properly record the deployment\nresults in our job, we need a script that polls the CloudFormation service\nand throws an error if the deployment or update fails.\n\n\n## Dynamic feature branch deployments and Review Apps\n\n\n![Dynamic feature branch deployments and Review\nApps](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/dynamic-feature-branch-deployments.png){:\n.shadow.medium.center}\n\n\nWhen a non-master branch is pushed to GitLab, our pipeline runs tests,\nbuilds the [updated source\ncode](/solutions/source-code-management/), and deploys and/or\nupdates the changed CloudFormation resources in the development AWS account.\nWhen the branch is merged into master, or if someone clicks the \"Stop\"\nbutton next to the branch's environment in GitLab CI, the CloudFormation\nstack will be torn down automatically.\n\n\nIt is perfectly possible, and indeed desirable, to have multiple development\nfeature branches simultaneously deployed as live environments for more\nefficient parallel feature development and QA. The serverless model makes\nthis a cost-effective strategy for collaborating in the cloud.\n\n\nIf we are dynamically deploying our application on every branch push, we\nmight like to view it as part of our interaction with the GitLab console\n(such as during a code review). GitLab supports this with a nifty feature\ncalled [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/). Review\nApps allow you to specify an \"environment\" as part of a deployment job, as\nseen in our `deploy:dev` job below:\n\n\n```yaml\n\ndeploy:dev:\n \u003C\u003C: *deploy_script\n stage: deploy-dev\n dependencies:\n   - build:dev\n variables:\n   \u003C\u003C: *dev_variables\n environment:\n   name: review/$CI_COMMIT_REF_NAME\n   url: https://${CI_COMMIT_REF_NAME}.${DEV_HOSTED_ZONE_NAME}/services\n   on_stop: stop:dev\n only:\n   - branches\n except:\n   - master\n```\n\n\nThe link specified in the `url` field of the `environment` property will be\naccessible in the `Environments` section of GitLab CI/CD or on any merge\nrequest of the associated branch. (In the case of the sample SAM application\nprovided with our example, since we don't have a front end to view, the link\njust takes you to a GET request for the `/services` API endpoint and should\ndisplay some raw JSON in your browser.)\n\n\n![Link to live\nenvironment](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/link-live-environment.png){:\n.shadow.medium.center}\n\n\nThe `on_stop` property specifies what happens when you \"shut down\" the\nenvironment in GitLab CI. This can be done manually or by deleting the\nassociated branch. In the case above, we have stopped behavior for dev\nenvironments linked to a separate job called `stop:dev`:\n\n\n```yaml\n\nstop:dev:\n stage: deploy-dev\n variables:\n   GIT_STRATEGY: none\n   \u003C\u003C: *dev_variables\n \u003C\u003C: *shutdown_script\n when: manual\n environment:\n   name: review/$CI_COMMIT_REF_NAME\n   action: stop\n only:\n   - branches\n except:\n   - master\n```\n\n\nThis job launches the `shutdown_script` script, which calls `aws\ncloudformation teardown` to clean up the SAM deployment.\n\n\nFor safety's sake, there is no automated teardown of staging or production\nenvironments.\n\n\n## Production releases\n\n\n![Production\nreleases](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/production-releases.png){:\n.shadow.medium.center}\n\n\nWhen a change is merged into the master branch, the code is built, tested\n(including dependency scans) and deployed to the staging environment. This\nis a separate, stable environment that developers, QA, and others can use to\nverify changes before attempting to deploy in production.\n\n\n![Staging\nenvironment](https://about.gitlab.com/images/blogimages/multi-account-aws-sam/staging-environment.png){:\n.shadow.medium.center}\n\n\nAfter deploying code to the staging environment, the pipeline will create a\nchange set for the production stack, and then pause for a manual\nintervention. A human user must click a button in the Gitlab CI/CD\n\"Environments\" view to execute the final change set.\n\n\n## Now what?\n\n\nStep back and take a deep breath – that was a lot of information! Let's not\nlose sight of what we've done here: we've defined a secure, multi-account\nAWS deployment pipeline in our GitLab repo, integrated tests, builds and\ndeployments, and successfully rolled a SAM-defined serverless app to the\ncloud. Not bad for a few lines of config!\n\n\nThe next step is to try this on your own. If you'd like to start with our\nsample \"AWS News\" application, you can simply run `sam init --location\ngit+https://gitlab.com/gitlab-examples/aws-sam` to download the project on\nyour local machine. The AWS News app contains a stripped-down,\nsingle-account version of the `gitlab-ci.yml` file discussed in this post,\nso you can try out deployments with minimal setup needed.\n\n\n## Further reading\n\n\nWe have barely scratched the surface of GitLab CI/CD and AWS SAM in this\npost. Here are some interesting readings if you would like to take your work\nto the next level:\n\n\n### SAM\n\n\n- [Implementing safe AWS Lambda deployments with AWS SAM and\nCodeDeploy](https://aws.amazon.com/blogs/compute/implementing-safe-aws-lambda-deployments-with-aws-codedeploy/)\n\n- [Running and debugging serverless applications locally using the AWS SAM\nCLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-test-and-debug.html)\n\n\n### GitLab CI\n\n\n- [Setting up a GitLab Runner on\nEC2](https://hackernoon.com/configuring-gitlab-ci-on-aws-ec2-using-docker-7c359d513a46)\n\n- [Scheduled\npipelines](https://docs.gitlab.com/ee/ci/pipelines/schedules.html)\n\n- [ChatOps](https://docs.gitlab.com/ee/ci/chatops/)\n\n\nPlease [let me know](https://twitter.com/forrestbrazeal) if you have further\nquestions!\n\n\n### About the guest author\n\n\nForrest Brazeal is an [AWS Serverless\nHero](https://aws.amazon.com/developer/community/heroes/forrest-brazeal/).\nHe currently works as a senior cloud architect at\n[Trek10](https://trek10.com), an AWS Advanced Consulting Partner. You can\n[read more about Trek10's GitLab journey here](/customers/trek10/).\n",[9,896,232,827,1042,763],{"slug":4217,"featured":6,"template":700},"multi-account-aws-sam-deployments-with-gitlab-ci","content:en-us:blog:multi-account-aws-sam-deployments-with-gitlab-ci.yml","Multi Account Aws Sam Deployments With Gitlab Ci","en-us/blog/multi-account-aws-sam-deployments-with-gitlab-ci.yml","en-us/blog/multi-account-aws-sam-deployments-with-gitlab-ci",{"_path":4223,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4224,"content":4230,"config":4235,"_id":4237,"_type":14,"title":4238,"_source":16,"_file":4239,"_stem":4240,"_extension":19},"/en-us/blog/notification-on-pipeline-succeeds",{"title":4225,"description":4226,"ogTitle":4225,"ogDescription":4226,"noIndex":6,"ogImage":4227,"ogUrl":4228,"ogSiteName":685,"ogType":686,"canonicalUrls":4228,"schema":4229},"Notification emails when pipelines are fixed","How to turn on/off Notifications for when pipelines are fixed","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681358/Blog/Hero%20Images/pipeline_success_unsplash.jpg","https://about.gitlab.com/blog/notification-on-pipeline-succeeds","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Notification emails when pipelines are fixed\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Orit Golowinski\"}],\n        \"datePublished\": \"2020-06-17\",\n      }",{"title":4225,"description":4226,"authors":4231,"heroImage":4227,"date":4232,"body":4233,"category":978,"tags":4234},[892],"2020-06-17","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nGitLab can send users a notification when a pipeline fails so that they can respond to the failure quickly. Now, GitLab can also send a [notification to inform users when the pipeline has been fixed](https://gitlab.com/gitlab-org/gitlab/-/issues/24309). This new notification eliminates the need for developers to constantly monitor the status of pipeline builds. This lets developers make better use of their precious time.\n\nWe are gradually rolling out this feature in order to test it out. During this testing period, users will start getting notifications for fixed pipelines.\n\nExample email:\n\n![Sample email pipeline fixed](https://about.gitlab.com/images/blogimages/email_notification.png)\n\nTo disable the setting for a particular project only:\n\n1. Go to the **Project overview** page for the project.\n1. Click the **\"bell\"** (Notification setting) button and select **Custom**.\n1. Uncheck the **Fixed pipeline** checkbox.\n\n![Instructions for disabling notification](https://about.gitlab.com/images/blogimages/disable_notification.png)\n\nTo disable the notification for all projects:\n\n1. In the top right, click your user icon and choose **Profile**.\n1. In the top right of the next page, click the **\"pencil\"** (Edit Profile) button.\n1. On the left, click Notifications.\n1. Expand the **Global notification level** drop-down box, and select **Custom**.\n1. Uncheck the Fixed pipeline checkbox.\n\n![Instructions for disabling notification](https://about.gitlab.com/images/blogimages/check_fix_pipeline.png)\n\nIf our tests are successful, this feature will remain active and be enabled by default. If needed, you can disable these notifications if they are not useful for you.\n\nCover image by [Austin Distel](https://images.unsplash.com/photo-1563986768609-322da13575f3?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1650&q=80) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9],{"slug":4236,"featured":6,"template":700},"notification-on-pipeline-succeeds","content:en-us:blog:notification-on-pipeline-succeeds.yml","Notification On Pipeline Succeeds","en-us/blog/notification-on-pipeline-succeeds.yml","en-us/blog/notification-on-pipeline-succeeds",{"_path":4242,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4243,"content":4249,"config":4253,"_id":4255,"_type":14,"title":4256,"_source":16,"_file":4257,"_stem":4258,"_extension":19},"/en-us/blog/offline-environments",{"title":4244,"description":4245,"ogTitle":4244,"ogDescription":4245,"noIndex":6,"ogImage":4246,"ogUrl":4247,"ogSiteName":685,"ogType":686,"canonicalUrls":4247,"schema":4248},"Running Security Scans in Limited Connectivity and Offline Environments","GitLab Security Scans can ensure Security in the most locked down environments.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666484/Blog/Hero%20Images/steven-kamenar-MMJx78V7xS8-unsplash.jpg","https://about.gitlab.com/blog/offline-environments","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Running Security Scans in Limited Connectivity and Offline Environments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2020-10-01\",\n      }",{"title":4244,"description":4245,"authors":4250,"heroImage":4246,"date":1776,"body":4251,"category":978,"tags":4252},[1775],"\n\n{::options parse_block_html=\"true\" /}\n\n\n\nIt’s possible to run most of the GitLab security scanners when not connected to the internet.\nGitLab scanners generally will connect to the internet to download the latest sets of signatures,\nrules, and patches. A few extra steps are necessary to configure the tools to function properly by\nusing resources available on your local network.\n\nThe security scans which are supported in [Offline Environments](https://docs.gitlab.com/ee/user/application_security/offline_deployments/) are as follows:\n\n- Container Scanning\n- SAST\n- DAST\n- License Compliance\n- Dependency Scanning\n\nWatch this short video (3 minutes) to learn how to setup GitLab Security Scans in Offline Environments.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/FoLmRvTcOAY\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\nCover image by [Steven Kamenar](https://unsplash.com/@skamenar) on [Unsplash](https://unsplash.com/)\n{: .note}\n\n\n",[9,697],{"slug":4254,"featured":6,"template":700},"offline-environments","content:en-us:blog:offline-environments.yml","Offline Environments","en-us/blog/offline-environments.yml","en-us/blog/offline-environments",{"_path":4260,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4261,"content":4267,"config":4273,"_id":4275,"_type":14,"title":4276,"_source":16,"_file":4277,"_stem":4278,"_extension":19},"/en-us/blog/one-billion-pipelines-cicd",{"title":4262,"description":4263,"ogTitle":4262,"ogDescription":4263,"noIndex":6,"ogImage":4264,"ogUrl":4265,"ogSiteName":685,"ogType":686,"canonicalUrls":4265,"schema":4266},"Beyond source code management: 1 billion pipelines of CI/CD innovation","GitLab recently reached a major CI/CD milestone - find out what this means for customer innovation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668799/Blog/Hero%20Images/securitylifecycle.png","https://about.gitlab.com/blog/one-billion-pipelines-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Beyond source code management: 1 billion pipelines of CI/CD innovation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jackie Porter\"}],\n        \"datePublished\": \"2023-10-04\",\n      }",{"title":4262,"description":4263,"authors":4268,"heroImage":4264,"date":4270,"body":4271,"category":1062,"tags":4272},[4269],"Jackie Porter","2023-10-04","\nOur DevSecOps journey began with a clear vision: to empower developers and organizations with a unified platform for simplifying the software development lifecycle. Today, GitLab enables thousands of organizations to accelerate value delivery by going beyond source code management and enhancing the CI/CD capabilities of our AI-powered DevSecOps platform. We recently achieved a significant milestone demonstrating how this innovation drives customer success – 1 billion pipelines have now run on GitLab's SaaS-based DevSecOps Platform.\n\nFrom healthcare to finance, e-commerce to education, our platform has become the backbone of [digital transformation journeys worldwide](http://about.gitlab.com/customers). Every day, more and more customers like [Lockheed Martin](https://about.gitlab.com/customers/lockheed-martin/), [Carfax](https://about.gitlab.com/customers/carfax/), [Hackerone](https://about.gitlab.com/customers/hackerone/), and [Deutsche Telekom](https://about.gitlab.com/customers/deutsche-telekom/) are benefitting from GitLab's CI/CD, which automates the building, testing, packaging, securing, and deploying of code, starting at their first commit. With GitLab, they deliver better code and faster releases – fewer bugs and more time spent on new features.\n\n> Test-drive CI/CD today with a [free trial of GitLab Ultimate](https://gitlab.com/-/trials/new).\n\n## Customer-driven innovation\nGitLab delivers customer-driven innovation through collaboration and contributions from the community and customers. In addition, as [GitLab is developed using GitLab](https://about.gitlab.com/handbook/engineering/development/principles/#dogfooding), we are able to identify and tackle the same issues our customers face. Let’s look at some of the critical advancements in GitLab CI/CD.\n\n### Dealing with time-to-market pressures\n\nTime-to-market is critical in today's fast-paced economic environment. GitLab's CI/CD pipelines accelerate the software delivery process, enabling organizations to respond swiftly to market demands. By incorporating artificial intelligence (AI) across the software development lifecycle, GitLab helps organizations improve productivity, enabling them to develop, secure, and deploy software even faster. Our new [GitLab Duo AI](https://about.gitlab.com/gitlab-duo/) capabilities further improve productivity and efficiency, including:\n* Code Suggestions, which helps with faster code creation\n* Suggested Reviewers, which expedites code reviews and approvals\n* Vulnerability summary, which aids with rapid vulnerability remediation\n* Value stream forecasting, which predicts future team efficiency\n\n_“Time to market was a big issue for us. Before our transformation to Agile and DevOps started, we had release cycles of nearly 18 months in some cases. We've been able to dramatically reduce that to roughly 3 months.\" Thorsten Bastian, Business Owner IT, CI/CD Hub, Telekom IT, [Deutsche Telekom](https://about.gitlab.com/customers/deutsche-telekom/)_\n\nGet to know GitLab's CI/CD capabilities with this demo.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/WKR-7clknsA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\n### Facing security vulnerabilities head-on\n\nSecurity is seamlessly integrated into the CI/CD pipeline within the natural workflow of developers, enabling them to detect vulnerabilities early. Security scans, including static application security testing ([SAST](https://docs.gitlab.com/ee/user/application_security/sast/)) and dynamic application security testing ([DAST](https://docs.gitlab.com/ee/user/application_security/dast/)), are incorporated directly into the CI/CD pipeline, helping to ensure that every release is secure by design. Security checks become integral to the development process, reducing the risk of vulnerabilities delaying releases. GitLab enables compliance teams to apply relevant controls and governance frameworks. Recently, we launched new capabilities to centralize policy management, expand reports and controls, and enhance our compliance dashboards.\n\n_“GitLab is helping us catch security flaws early and it's integrated it into the developer's flow. An engineer can push code to GitLab CI, get that immediate feedback from one of many cascading audit steps, and see if there's a security vulnerability built in there, and even build their own new step that might test a very specific security issue.” Mitch Trale, Head of Infrastructure, [HackerOne](https://about.gitlab.com/customers/hackerone/)_\n\nLearn how to use vulnerability management tools in your environment with this demo.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/8SJHz6BCgXM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\n### Dealing with developer productivity and toolchain complexity\nGitLab helps organizations build a framework for platform engineering to create golden paths to standardize, scale, and secure workflows.\n\nEstablishing these golden paths helps combat cognitive overload and the trend of \"you build it, you run it,\" which have taken a toll on developer productivity and happiness. Golden paths also support consistent application of policies across the organization, addressing the challenges that arise when [different teams use different processes and tools](https://about.gitlab.com/the-source/platform/devops-teams-want-to-shake-off-diy-toolchains-a-platform-is-the-answer/). GitLab includes capabilities like templates, inheritance rules, infrastructure as code, and remote development that benefit DevSecOps teams by reducing time to onboard new developers, improving workflow efficiency and collaboration, and supporting workspace flexibility.\n\n_“It seems that everything is just cleaner now when moving code to production. We’re putting out more new product features because teams are spending more time creating code than making sure their pipelines are running. When we go to commonize our CI/CD pipelines, we can move them (workloads to the cloud) with a common on-ramp that makes it easier.” Mark Portofe, Director of Platform Engineering, [CARFAX](https://about.gitlab.com/customers/carfax/)_\n\nHear Mark Portofe from CARFAX walk through their journey of establishing golden paths to improve developer productivity with GitLab.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://player.vimeo.com/video/853193701?h=1c829eb7b7\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\n## Looking ahead\nGitLab remains committed to pushing the boundaries of what's possible in CI/CD. We continue to innovate and provide you with the tools and capabilities you need to stay ahead in a rapidly evolving tech landscape. Here are some of the key capabilities we are excited to roll out in the coming months:\n\n* [CI/CD catalog](https://about.gitlab.com/blog/introducing-ci-components/) to create discoverable, shareable, and accessible building blocks that promote reuse and innersourcing and support scalable DevSecOps processes\n* [Customizable roles](https://about.gitlab.com/blog/expanding-guest-capabilities-in-gitlab-ultimate/) to implement customizable separation of duties policies\n* [AI capabilities](https://about.gitlab.com/blog/modern-software-development-problems-require-modern-ai-powered-devsecops/) like Code Suggestions extended to self-managed deployments and GitLab Duo Chat to provide a context-aware assistant for developers to enhance their productivity\n* [Comprehensive and centralized policy management](https://about.gitlab.com/blog/meet-regulatory-standards-with-gitlab/) that combines the flexibility of compliance pipelines with the user experience of scan execution policies into a single solution\n\nGitLab was named a Leader in the 2023 Gartner® Magic Quadrant™ for DevOps Platforms. We believe this recognizes our role in helping customers streamline their software delivery process and deliver software faster. [Download the report](http://about.gitlab.com/gartner-magic-quadrant) to learn more.\n\nYou can try CI/CD today with a [free trial of GitLab Ultimate](https://gitlab.com/-/trials/new).\n\n_Gartner, Magic Quadrant for DevOps Platforms, Manjunath Bhat, Thomas Murphy, Et al., 05 June 2023. GARTNER is a registered trademark and service mark of Gartner, Inc. and/or its affiliates in the U.S. and internationally, and MAGIC QUADRANT is a registered trademark of Gartner, Inc. and/or its affiliates and are used herein with permission. All rights reserved. Gartner does not endorse any vendor, product or service depicted in its research publications, and does not advise technology users to select only those vendors with the highest ratings or other designation. Gartner research publications consist of the opinions of Gartner’s research organization and should not be construed as statements of fact. Gartner disclaims all warranties, expressed or implied, with respect to this research, including any warranties of merchantability or fitness for a particular purpose._\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n\n",[9,1062,720],{"slug":4274,"featured":6,"template":700},"one-billion-pipelines-cicd","content:en-us:blog:one-billion-pipelines-cicd.yml","One Billion Pipelines Cicd","en-us/blog/one-billion-pipelines-cicd.yml","en-us/blog/one-billion-pipelines-cicd",{"_path":4280,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4281,"content":4287,"config":4293,"_id":4295,"_type":14,"title":4296,"_source":16,"_file":4297,"_stem":4298,"_extension":19},"/en-us/blog/online-retailer-bol-tackles-growing-compliance-needs-with-gitlab",{"title":4282,"description":4283,"ogTitle":4282,"ogDescription":4283,"noIndex":6,"ogImage":4284,"ogUrl":4285,"ogSiteName":685,"ogType":686,"canonicalUrls":4285,"schema":4286},"Online retailer bol tackles growing compliance needs with GitLab","Learn how GitLab helps the major international company adhere to regulations while increasing development efficiency.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665465/Blog/Hero%20Images/blog-image-template-1800x945__15_.png","https://about.gitlab.com/blog/online-retailer-bol-tackles-growing-compliance-needs-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Online retailer bol tackles growing compliance needs with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Julie Griffin\"}],\n        \"datePublished\": \"2024-06-12\",\n      }",{"title":4282,"description":4283,"authors":4288,"heroImage":4284,"date":4290,"body":4291,"category":1288,"tags":4292},[4289],"Julie Griffin","2024-06-12","[Bol](https://www.bol.com/nl/nl/), which uses GitLab Ultimate, is one of the largest online retailers in the Netherlands and Belgium. The company offers a product range of 38 million items alongside 50,000 sales partners who sell their goods on its marketplace. Bol relies on innovative technology to increase development efficiency, adhere to compliance regulations, and maintain trust across its extensive customer base.\n\nBol equips its teams with the GitLab DevSecOps platform, enabling its developers to quickly and securely ship projects, while saving the team thousands of manual hours on compliance checks.\n\n“GitLab is helping us stay flexible and competitive as we grow, and as the requirements that our software and our developers need to comply with grow,” says Guus Houtzager, engineering manager on bol’s Continuous Integration and Continuous Deployment team. “That's the biggest challenge that we had and we tackled it with GitLab.”\n\nHowever, as bol's revenue grew, so did the compliance rules and regulations it had to adhere to. The company needs to continually adapt its software to meet strict, and often updated regulations, such as the General Data Protection Regulation (GDPR), International Organization for Standardization (ISO) requirements, and the EU Artificial Intelligence Act.\n\nAfter adopting GitLab Community in 2016 and GitLab Premium several years later, bol upgraded to GitLab Ultimate in 2024 to [meet the growing compliance load](https://about.gitlab.com/solutions/security-compliance/) and help its teams tackle projects faster and more efficiently.\n\n![Guus Houtzager of bol - quote box](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675638/Blog/Content%20Images/bol_Blog_-_Guus.png)\n\n## Saving thousands of developer hours per month \n\nGitLab enables bol’s DevSecOps teams to set up policies that automate compliance configurations and checks. This helps them achieve consistency and scalability in their compliance efforts, and reduce the risk of human error. With compliance guardrails in place, its team of 850 developers can focus more of their energy on creating innovative, secure software.\n\n“We bought GitLab Ultimate so we can have compulsory compliance pipelines that ensures our teams are working within compliance regulations from the start,” says Houtzager.\n\nBy allowing developers to focus on coding without the burden of compliance regulations, the bol development team dramatically increased its efficiency.\n\n“This has saved our developers several thousands of hours in total per month,” says Houtzager.\n\nIn addition to time savings, the team is now confident it can handle any compliance roadblocks that come its way.\n\n“We know that GitLab is going to help us with compliance and software security,” says Houtzager. “Even if we get new regulations, we have a toolkit, through GitLab, that enables us to follow and comply with any new regulations. We don't know exactly what will happen, but we know we are in a position to handle whatever comes our way.”\n\n## Shifting left to protect customers and its business\n\nAs a large player in the European retail world, trust is a key pillar of bol’s business model. The company handles a large quantity of personal data, such as addresses and order details. While regulatory fines are a concern, so is maintaining trust with its customer base. That only emphasizes the importance of security.\n\n“Most of the people in the Netherlands and Belgium have bought something from us in the past and people trust us,” says Houtzager. “They trust that we handle their payment details properly. We don't sell your Personal Identifiable Information PII data, and they trust us to keep it safe and secure.”\n\nTo protect customer data and its business, bol shifted security left, enabling developers to find errors and vulnerabilities earlier in the development process. However, shifting left without the right tools in place could lead to developers spending countless hours trying to correct any problems they find.\n\n“If you shift left without also providing teams the tools, support, and processes to make sure that they can do this work in an efficient manner, teams get bogged down in either procedures or manual work,” says Houtzager.\n\nWith GitLab Ultimate, bol is able to set up the layout and permission model to meet the company’s security requirements, giving developers the freedom to quickly build and ship projects while protecting customer and business data. The DevSecOps platform has the added benefit of tracking the changes and fixes that developers make and noting them in compliance records. \n\n## Looking ahead to AI\n\nMoving forward, bol plans to use more GitLab Ultimate features, like cloud integration, and artificial intelligence (AI) capabilities, along with even more security features. \n\nFrom building secure software faster to improving the developer experience, bol looks forward to one day using AI-powered [GitLab Duo](https://about.gitlab.com/gitlab-duo/) to help them scale their software development. \n\n“The situation must be right for us to be able to use it and then we will definitely take a look at how it can help us,” says Houtzager. “We, like everybody else, are looking at where AI can help us to improve situations across the entire software development life cycle. So if someone is building code, how can it help them? If someone is working on other aspects of the process, how can it help them?” \n\n> Read more customer stories on [the GitLab customers page](https://about.gitlab.com/customers/).\n",[9,697,720,495],{"slug":4294,"featured":6,"template":700},"online-retailer-bol-tackles-growing-compliance-needs-with-gitlab","content:en-us:blog:online-retailer-bol-tackles-growing-compliance-needs-with-gitlab.yml","Online Retailer Bol Tackles Growing Compliance Needs With Gitlab","en-us/blog/online-retailer-bol-tackles-growing-compliance-needs-with-gitlab.yml","en-us/blog/online-retailer-bol-tackles-growing-compliance-needs-with-gitlab",{"_path":4300,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4301,"content":4307,"config":4313,"_id":4315,"_type":14,"title":4316,"_source":16,"_file":4317,"_stem":4318,"_extension":19},"/en-us/blog/open-sourcing-the-gitter-mobile-apps",{"title":4302,"description":4303,"ogTitle":4302,"ogDescription":4303,"noIndex":6,"ogImage":4304,"ogUrl":4305,"ogSiteName":685,"ogType":686,"canonicalUrls":4305,"schema":4306},"Open-sourcing the Gitter mobile apps","Learn how we open sourced the Android and iOS Gitter apps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666717/Blog/Hero%20Images/cover-image.jpg","https://about.gitlab.com/blog/open-sourcing-the-gitter-mobile-apps","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Open-sourcing the Gitter mobile apps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eric Eastwood\"}],\n        \"datePublished\": \"2019-11-22\",\n      }",{"title":4302,"description":4303,"authors":4308,"heroImage":4304,"date":4310,"body":4311,"category":718,"tags":4312},[4309],"Eric Eastwood","2019-11-22","Before we acquired Gitter most every part of Gitter was\nprivate/closed-source. The main\n[webapp](https://gitlab.com/gitlab-org/gitter/webapp) was open-sourced in\nJune 2017 and got both mobile\n[Android](https://gitlab.com/gitlab-org/gitter/gitter-android-app)/[iOS](https://gitlab.com/gitlab-org/gitter/gitter-ios-app)\napps open sourced in September 2018. If you would like to come help out,\nfeel free to send us a merge request! This blog post will go over some the\ntechnical details of making the projects available for anyone to contribute.\n\n\nHere is the basic overview:\n\n\n1.  Find secrets in the current state of the project (don't worry about the\ncommit history) and move to some config that isn't tracked in the repo.\n\n1.  Find/remove secrets throughout the whole repo commit history.\n\n1.  Make the project public 🎉\n\n1.  Caveats:\n    - Because we are rewriting the git history, I don't know of a way to keep merge requests/pull requests because the MRs reference the old commit hashes.\n\nQuick navigation:\n\n\n- [Jump to open sourcing Android](#android)\n\n- [Jump to open sourcing iOS](#ios)\n\n\n## Android\n\n\nIf you want to check out the full project and final result, you can check\nout the [project on\nGitLab](https://gitlab.com/gitlab-org/gitter/gitter-android-app)\n([open-sourced\n2018-8-8](https://twitter.com/gitchat/status/1027293167471812611)).\n\n\nTo start out, we used the [GitHub to GitLab project\nimport](https://docs.gitlab.com/ee/user/project/import/github.html) to move\nthe private GitHub project over to GitLab. We named it `gitter-android-app2`\nso that later on we could create the actual clean public project without any\nof the orphaned git references that may potentially leak.\n\n\n### Finding secrets\n\n\n[`truffleHog`](https://github.com/dxa4481/truffleHog) will search for high\nentropy strings (like tokens/passwords) through the entire git repo history.\nIt's also useful to find all the potential areas where secrets may still\nexist in the current state of the project. Some sticky points we encountered\nwhile using include:\n\n\n- \"I wish we could just search the current state of the project instead of\nall git history (the `--max_depth=2` argument will just make it search the\ndiff of the latest commit)\"\n[dxa4481/truffleHog#92](https://github.com/dxa4481/truffleHog/issues/92).\n\n- \"The output will show the entire diff for the triggered commit which is a\nbit burdensome to see exactly what is wrong. The JSON output `--json` is\nsometimes easier to understand\"\n[https://github.com/dxa4481/truffleHog/issues/58](https://github.com/dxa4481/truffleHog/issues/58)\nor\n[dxa4481/truffleHog#102](https://github.com/dxa4481/truffleHog/issues/102).\n\n\n### Moving secrets to untracked config\n\n\nOnce we figure out where all of the secrets are we need a config/variable\nsolution that isn't tracked by git but still lets them be available when\nbuilding. We also wanted the solution to work in GitLab CI for some sanity\nbuilds/testing. There are lots of good articles on this topic:\n\n\n- [Remove private signing information from your\nproject](https://developer.android.com/studio/build/gradle-tips#remove-private-signing-information-from-your-project)\n\n- [Keeping Your Android Project’s Secrets\nSecret](https://medium.com/@geocohn/keeping-your-android-projects-secrets-secret-393b8855765d)\n\n- [Hiding Secrets in Android\nApps](https://rammic.github.io/2015/07/28/hiding-secrets-in-android-apps/)\n\n- [Keeping secrets in an Android\nApplication](https://joshmcarthur.com/2014/02/16/keeping-secrets-in-an-android-application.html)\n\n- [Android: Loading API Keys and other secrets from properties file using\ngradle](https://gist.github.com/curioustechizen/9f7d745f9f5f51355bd6)\n\n- [How can I keep API keys out of source\ncontrol?](https://arstechnica.com/information-technology/2013/12/how-can-i-keep-api-keys-out-of-source-control/)\n\n\nOur solution is completely based on the information in these articles. We\nchose to go the route of defining things in a `secrets.properties` file\nwhich can easily be read in the Gradle build script which handles the build\neven when using Android Studio. If the `secrets.properties` file doesn't\nexist (like in CI), it will try to read the secrets from [environment\nvariables which can easily be supplied in the project\nsettings](https://docs.gitlab.com/ee/ci/variables/).\n\n\n`secerts.properties`\n\n\n```properties\n\n# Visit https://developer.gitter.im/apps (sign in) and create a new app\n\n# Name: my-gitter-android-app (can be anything)\n\n# Redirect URL: https://gitter.im/login/oauth/callback\n\noauth_client_id=\"...\"\n\noauth_client_secret=\"...\"\n\noauth_redirect_uri=\"https://gitter.im/login/oauth/callback\"\n\n```\n\n\n`build.gradle`\n\n\n```gradle\n\napply plugin: 'com.android.application'\n\n\n// Try reading secrets from file\n\ndef secretsPropertiesFile = rootProject.file(\"secrets.properties\")\n\ndef secretProperties = new Properties()\n\nif (secretsPropertiesFile.exists()) {\n    secretProperties.load(new FileInputStream(secretsPropertiesFile))\n}\n\n// Otherwise read from environment variables, this happens in CI\n\nelse {\n    secretProperties.setProperty(\"oauth_client_id\", \"\\\"${System.getenv('oauth_client_id')}\\\"\")\n    secretProperties.setProperty(\"oauth_client_secret\", \"\\\"${System.getenv('oauth_client_secret')}\\\"\")\n    secretProperties.setProperty(\"oauth_redirect_uri\", \"\\\"${System.getenv('oauth_redirect_uri')}\\\"\")\n}\n\n\nandroid {\n    ...\n\n    defaultConfig {\n        ...\n\n        buildConfigField(\"String\", \"oauth_client_id\", \"${secretProperties['oauth_client_id']}\")\n        buildConfigField(\"String\", \"oauth_client_secret\", \"${secretProperties['oauth_client_secret']}\")\n        buildConfigField(\"String\", \"oauth_redirect_uri\", \"${secretProperties['oauth_redirect_uri']}\")\n    }\n    ...\n}\n\n```\n\n\nUse the config variables in the Java app:\n\n\n```java\n\nimport im.gitter.gitter.BuildConfig;\n\n\nBuildConfig.oauth_client_id;\n\nBuildConfig.oauth_client_secret;\n\nBuildConfig.oauth_redirect_uri;\n\n```\n\n\n#### Removing compiled assets\n\n\nWe use a `WebView` to display the HTML markdown messages in the chat room.\nThis view uses assets built from the main [`webapp`\nproject](https://gitlab.com/gitlab-org/gitter/webapp). Because these assets\nhad some inlined production\n[`webapp`](https://gitlab.com/gitlab-org/gitter/webapp) secrets that whole\ndirectory needed to be removed.\n\n\nInitially, we opted to have the developer build these assets with their own\nsecrets and symlink the build output directory. The [community made this\neven\nsimpler](https://gitlab.com/gitlab-org/gitter/gitter-android-app/merge_requests/113),\nso now there is just a Gradle task to run which fetches the latest build we\nhave available from the `webapp` GitLab CI.\n\n\n### Removing secrets from the repo history\n\n\nFrom your `truffleHog` results earlier, you should know where secrets were\nstored throughout the history. We can use [BFG\nRepo-Cleaner](https://rtyley.github.io/bfg-repo-cleaner/) to remove and\nrewrite the repo history quickly.\n\n\nWhen using BFG, I wanted just to rewrite all of the sensitive values in\n`app/src/main/res/values/settings.xml` instead of completely removing them,\nbut rewriting isn't an option with BFG so I went ahead with deleting it and\nrecreated it in a commit afterwards. 🤷\n\n\nFor the Android app, here are the BFG commands I used,\n\n\n- Remove `app/src/main/assets/www/`\n  - `java -jar \"bfg.jar\" --delete-folders www`\n- Remove `app/src/main/res/values/settings.xml`\n  - `java -jar \"bfg.jar\" --delete-files settings.xml`\n- Remove sensitive strings where we can't just remove the whole file\n(collected from `truffleHog` results)\n  - `java -jar \"bfg.jar\" --replace-text \"gitter-android-bad-words.txt\"`\n\nAfter you think you removed all the secrets, it's best to run `truffleHog`\nagain just to make sure no secrets are leftover. 😉\n\n\n### Make it public\n\n\nNow it's time to update your `readme` with some setup instruction so the\ncommunity knows how to contribute.\n\n\nThis is the scary part 😅. Go to **Project settings** > **General** >\n**Permissions** > set **Project visibility** as **Public**. You can [read\nmore about project access\nhere](https://docs.gitlab.com/ee/public_access/public_access.html).\n\n\nCurious about how to setup builds in GitLab CI? [Learn more from this blog\npost](/blog/setting-up-gitlab-ci-for-android-projects/), which was what we\nused to set it up for our projects.\n\n\nYou can even learn how we [automated the release process so we can publish\nstraight to the Google Play Store from GitLab CI via fastlane\n🚀](/blog/android-publishing-with-gitlab-and-fastlane/).\n\n\n## iOS\n\n\nIf you want to see the full project and final result, you can check out the\n[project on GitLab](https://gitlab.com/gitlab-org/gitter/gitter-ios-app)\n([open-sourced\n2018-9-18](https://twitter.com/gitchat/status/1041795909103898625)).\n\n\nThe same concepts apply from the Android section. We create a separate\nprivate project, `gitter-ios-app2`, where we can work and later on, we can\ncreate the actual clean public project(`gitter-ios-app`) without any of the\norphaned git references that could leak.\n\n\n### Finding secrets\n\n\n`truffleHog` didn't work well in the iOS project because there was a bunch\nof generated XCode files that had file hashes (high entropy strings which\ntruffleHog looks for) – which meant every commit was listed. 🤦‍ Instead of\ntrying to find something to filter the results down or get another tool, I\ndecided just search manually. Here is the list of things we looked for:\n\n\n- `token`\n\n- `secret`\n\n- `key`\n\n- `cert`\n\n- `api`\n\n- `pw`\n\n- `password`\n\n\nI used this directory filter when `Ctrl + f` those strings above to avoid\nfinding things outside of the repo itself (copy-paste for Atom editor):\n`!Common/,!Libraries,!Gitter/www,!Pods/,!xctool`\n\n\n### Moving secrets to untracked config\n\n\nThe iOS app uses a few git sub-modules which we also had to check for\nsecrets before making them public. It turned out only one of the sub-modules\n–\n[`troupeobjccommon`](https://gitlab.com/gitlab-org/gitter/troupeobjccommon)\n– had secrets of it's own so I ran through the same secret removal process.\n\n\nWe had the same OAuth secrets in the main part of the iOS app, but since\n`troupeobjccommon` was also trying to handle OAuth secret settings, we opted\nfor putting the new logic in `troupeobjccommon` to avoid having to refactor\nwhatever other downstream code that uses the same submodule (like the macOS\ndesktop app).\n\n\nHere are some articles around handling secrets in an iOS project,\n\n\n- [Secret variables in Xcode AND your CI for fun and profit\n💌](https://medium.com/flawless-app-stories/secret-variables-in-xcode-and-your-ci-for-fun-and-profit-d387a50475d7)\n\n- [Secrets Management in iOS\nApplications](https://medium.com/@jules2689/secrets-management-in-ios-applications-52795c254ec1)\n\n\nSince iOS apps can only be built on macOS and we don't have any macOS GitLab\nCI runners, our solution doesn't have to be CI compatible. You can track\n[this issue for shared macOS GitLab CI\nrunners](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/5720).\n\n\n`Gitter/GitterSecrets-Dev.plist`\n\n\n```xml\n\n\u003C?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\n\u003C!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\"\n\"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n\n\u003Cplist version=\"1.0\">\n\n\u003Cdict>\n  \u003C!--\n  Visit https://developer.gitter.im/apps (sign in) and create a new app\n  Name: my-gitter-ios-app (can be anything)\n  Redirect URL: https://gitter.im/login/oauth/callback\n  -->\n  \u003Ckey>OAuthClientId\u003C/key>\n  \u003Cstring>\u003C/string>\n  \u003Ckey>OAuthClientSecret\u003C/key>\n  \u003Cstring>\u003C/string>\n  \u003Ckey>OAuthCallback\u003C/key>\n  \u003Cstring>https://gitter.im/login/oauth/callback\u003C/string>\n\u003C/dict>\n\n\u003C/plist>\n\n```\n\n\n[`troupeobjccommon`](https://gitlab.com/gitlab-org/gitter/troupeobjccommon)\nis in Objective-C\n\n\n`TRAppSettings.h`\n\n\n```h\n\n#import \u003CFoundation/Foundation.h>\n\n\n@interface TRAppSettings : NSObject\n\n\n+ (TRAppSettings *) sharedInstance;\n\n\n- (NSString *) clientID;\n\n\n- (NSString *) clientSecret;\n\n\n- (NSString *) oauthScope;\n\n\n@end\n\n```\n\n\n`TRAppSettings.m`\n\n\n```objc\n\n@interface TRAppSettings ()\n\n\n@property (strong, nonatomic) NSUserDefaults *secrets;\n\n\n@end\n\n\nstatic TRAppSettings *sharedAppSettingsSingleton;\n\n\n@implementation TRAppSettings {\n    int firstRunPostUpdate;\n}\n\n\n+ (void)initialize\n\n{\n    static BOOL initialized = NO;\n    if(!initialized)\n    {\n        initialized = YES;\n        sharedAppSettingsSingleton = [[TRAppSettings alloc] init];\n    }\n\n    NSLog(@\"Pulling secrets from SECRETS_PLIST = %@.plist\", SECRETS_PLIST);\n}\n\n\n+ (TRAppSettings *) sharedInstance\n\n{\n    return sharedAppSettingsSingleton;\n}\n\n\n- (id)init {\n    NSString *troupeSecretsPath = [[NSBundle mainBundle] pathForResource:\"GitterSecrets-Dev\" ofType:@\"plist\"];\n    if(troupeSecretsPath == nil) {\n        NSString *failureReason = [NSString stringWithFormat:@\"Gitter secrets file not found in bundle: %@.plist. You probably need to add it to the `Gitter/Supporting Files` in Xcode navigator\", SECRETS_PLIST];\n        NSException* exception = [NSException\n            exceptionWithName:@\"FileNotFoundException\"\n            reason:failureReason\n            userInfo:nil];\n\n        NSLog(@\"%@\", failureReason);\n\n        [exception raise];\n    }\n    NSDictionary *troupeSecrets = [NSDictionary dictionaryWithContentsOfFile:troupeSecretsPath];\n\n    self.secrets = [NSUserDefaults standardUserDefaults];\n    [self.secrets registerDefaults:troupeSecrets];\n}\n\n\n- (NSString *) clientID {\n    return [self.secrets stringForKey:@\"OAuthClientId\"];\n}\n\n\n- (NSString *) clientSecret {\n    return [self.secrets stringForKey:@\"OAuthClientSecret\"];\n}\n\n\n- (NSString *)oauthScope {\n    return [self.secrets stringForKey:@\"OAuthCallback\"];\n}\n\n```\n\n\nUsage in the Swift app:\n\n\n```swift\n\nprivate let appSettings = TRAppSettings.sharedInstance()\n\n\nappSettings!.clientID()\n\nappSettings!.clientSecret()\n\nappSettings!.oauthScope()\n\n```\n\n\n### Adding in GitLab CI\n\n\nIf you're interested in setting up automated builds and publish releases to\nthe Apple App Store from GitLab CI, you can learn how [blog post about using\nfastlane](/blog/ios-publishing-with-gitlab-and-fastlane/).\n\n\n### Removing secrets from the repo history\n\n\nWe didn't have a complete picture of what to remove because `truffleHog`\ndidn't work well, so we didn't use BFG Repo-Cleaner. To remove secrets from\nthe git repo history, we just squashed all of the history into a single\ncommit.\n\n\n## Life after open sourcing apps\n\n\nWe have some [thoughts of deprecating the Android/iOS\napps](https://gitlab.com/gitlab-org/gitter/webapp/issues/2281) but the\ncommunity has been great to keep the apps alive so far. We released a couple\nversions of each app including [dark\ntheme](https://gitlab.com/gitlab-org/gitter/gitter-android-app/merge_requests/2)\nand [GitLab\nsign-in](https://gitlab.com/gitlab-org/gitter/gitter-android-app/merge_requests/112)\nfor Android and a bunch of technical debt and fixes for iOS, including\nremoving the deprecated\n[`SlackTextViewController`](https://gitlab.com/gitlab-org/gitter/gitter-ios-app/merge_requests/8)\n(and we are intensely working on incorporating the new\n[`SlackWysiwygInputController`](https://goo.gl/7NDM3x) 😜).\n\n\nThe\n[Android](https://gitlab.com/gitlab-org/gitter/gitter-android-app)/[iOS](https://gitlab.com/gitlab-org/gitter/gitter-ios-app)\napps could benefit from a lot of polish and fixes, so if you see anything\nparticularly annoying, we would love to review and merge your updates!\n\n\nCover image by [Nate Johnston](https://unsplash.com/@natejohnston) on\n[Unsplash](https://unsplash.com/photos/DkCydKeaLV8).\n\n{: .note}\n",[827,873,3660,9],{"slug":4314,"featured":6,"template":700},"open-sourcing-the-gitter-mobile-apps","content:en-us:blog:open-sourcing-the-gitter-mobile-apps.yml","Open Sourcing The Gitter Mobile Apps","en-us/blog/open-sourcing-the-gitter-mobile-apps.yml","en-us/blog/open-sourcing-the-gitter-mobile-apps",{"_path":4320,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4321,"content":4327,"config":4332,"_id":4334,"_type":14,"title":4335,"_source":16,"_file":4336,"_stem":4337,"_extension":19},"/en-us/blog/parent-child-pipelines",{"title":4322,"description":4323,"ogTitle":4322,"ogDescription":4323,"noIndex":6,"ogImage":4324,"ogUrl":4325,"ogSiteName":685,"ogType":686,"canonicalUrls":4325,"schema":4326},"How to get started with Parent-child pipelines","We introduced improvements to pipelines to help scale applications and their repo structures more effectively. Here's how they work.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667040/Blog/Hero%20Images/parent_pipeline_graph.png","https://about.gitlab.com/blog/parent-child-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to get started with Parent-child pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chris Ward\"}],\n        \"datePublished\": \"2020-04-24\",\n      }",{"title":4322,"description":4323,"authors":4328,"heroImage":4324,"date":4329,"body":4330,"category":718,"tags":4331},[1059],"2020-04-24","As applications and their repository structures grow in complexity, a\nrepository `.gitlab-ci.yml` file becomes difficult to manage, collaborate\non, and see benefit from. This problem is especially true for the\nincreasingly popular \"[monorepo](https://en.wikipedia.org/wiki/Monorepo)\"\npattern, where teams keep code for multiple related services in one\nrepository. Currently, when using this pattern, developers all use the same\n`.gitlab-ci.yml` file to trigger different automated processes for different\napplication components, likely causing merge conflicts, and productivity\nslowdown, while teams wait for \"their part\" of a pipeline to run and\ncomplete.\n\n\nTo help large and complex projects manage their automated workflows, we've\nadded two new features to make pipelines even more powerful: Parent-child\npipelines, and the ability to generate pipeline configuration files\ndynamically.\n\n\n## Meet Parent-child pipelines\n\n\nSo, how do you solve the pain of many teams collaborating on many\ninter-related services in the same repository? \n\nLet me introduce you to [Parent-child\npipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html),\nreleased with with [GitLab\n12.7](/releases/2020/01/22/gitlab-12-7-released/#parent-child-pipelines).\nSplitting complex pipelines into multiple pipelines with a parent-child\nrelationship can improve performance by allowing child pipelines to run\nconcurrently. This relationship also enables you to compartmentalize\nconfiguration and visualization into different files and views. \n\n\n### Creating a child pipeline\n\n\nYou trigger a child pipeline configuration file from a parent by including\nit with the `include` key as a parameter to the `trigger` key. You can name\nthe child pipeline file whatever you want, but it still needs to be valid\nYAML.\n\n\nThe parent configuration below triggers two further child pipelines that\nbuild the Windows and Linux version of a C++ application. \n\n\n```cpp\n\n#include \u003Ciostream>\n\nint main()\n\n{\n  std::cout \u003C\u003C \"Hello GitLab!\" \u003C\u003C std::endl;\n  return 0;\n}\n\n```\n\n\nThe setup is a simple one but hopefully illustrates what is possible.\n\n\n```yaml\n\nstages:\n  - triggers\n\nbuild_windows:\n  stage: triggers\n  trigger:\n    include: .win-gitlab-ci.yml\n  rules:\n    - changes:\n      - cpp_app/*\n\nbuild_linux:\n  stage: triggers\n  trigger:\n    include: .linux-gitlab-ci.yml\n  rules:\n    - changes:\n      - cpp_app/*\n```\n\n\nThe important values are the `trigger` keys which define the child\nconfiguration file to run, and the parent pipeline continues to run after\ntriggering it. You can use all the normal sub-methods of `include` to use\nlocal, remote, or template config files, up to a maximum of three child\npipelines.\n\n\nAnother useful pattern to use for parent-child pipelines is a `rules` key to\ntrigger a child pipeline under certain conditions. In the example above, the\nchild pipeline only triggers when changes are made to files in the _cpp_app_\nfolder.\n\n\nThe Windows build child pipeline (`.win-gitlab-ci.yml`) has the following\nconfiguration, and unless you want to trigger a further child pipeline, it\nfollows standard a configuration format:\n\n\n```yaml\n\nimage: gcc\n\nbuild:\n  stage: build\n  before_script:\n    - apt update && apt-get install -y mingw-w64\n  script:\n    - x86_64-w64-mingw32-g++ cpp_app/hello-gitlab.cpp -o helloGitLab.exe\n  artifacts:\n    paths:\n      - helloGitLab.exe\n```\n\n\nDon't forget the `-y` argument as part of the `apt-get install` command, or\nyour jobs will be stuck waiting for user input.\n\n\nThe Linux build child pipeline (`.linux-gitlab-ci.yml`) has the following\nconfiguration, and unless you want to trigger a further child pipeline, it\nfollows standard a configuration format:\n\n\n```yaml\n\nimage: gcc\n\nbuild:\n  stage: build\n  script:\n    - g++ cpp_app/hello-gitlab.cpp -o helloGitLab\n  artifacts:\n    paths:\n      - helloGitLab\n```\n\n\nIn both cases, the child pipeline generates an artifact you can download\nunder the _Job artifacts_ section of the Job result screen.\n\n\nPush all the files you created to a new branch, and for the pipeline result,\nyou should see the two jobs and their subsequent child jobs.\n\n\n![Parent-child pipeline\nresult](https://about.gitlab.com/images/blogimages/non-dynamic-pipelines.png){:\n.shadow.medium.center}\n\nThe result of a parent-child pipeline\n\n{: .note.text-center}\n\n\n## Dynamically generating pipelines\n\n\nTaking Parent-child pipelines even further, you can also dynamically\ngenerate the child configuration files from the parent pipeline. Doing so\nkeeps repositories clean of scattered pipeline configuration files and\nallows you to generate configuration in your application, pass variables to\nthose files, and much more.\n\n\nLet's start with the parent pipeline configuration file:\n\n\n```yaml\n\nstages:\n  - setup\n  - triggers\n\ngenerate-config:\n  stage: setup\n  script:\n    - ./write-config.rb\n    - git status\n    - cat .linux-gitlab-ci.yml\n    - cat .win-gitlab-ci.yml\n  artifacts:\n    paths:\n      - .linux-gitlab-ci.yml\n      - .win-gitlab-ci.yml\n\ntrigger-linux-build:\n  stage: triggers\n  trigger:\n    include:\n      - artifact: .linux-gitlab-ci.yml\n        job: generate-config\n\ntrigger-win-build:\n  stage: triggers\n  trigger:\n    include:\n      - artifact: .win-gitlab-ci.yml\n        job: generate-config\n```\n\n\nDuring our self-defined `setup` stage the pipeline runs the\n`write-config.rb` script. For this article, it's a Ruby script that writes\nthe child pipeline config files, but you can use any scripting language. The\nchild pipeline config files are the same as those in the non-dynamic example\nabove. We use `artifacts` to save the generated child configuration files\nfor this CI run, making them available for use in the child pipelines\nstages.\n\n\nAs the Ruby script is generating YAML, make sure the indentation is correct,\nor the pipeline jobs will fail.\n\n\n```ruby\n\n#!/usr/bin/env ruby\n\n\nlinux_build = \u003C\u003C~YML\n    image: gcc\n    build:\n        stage: build\n        script:\n            - g++ cpp_app/hello-gitlab.cpp -o helloGitLab\n        artifacts:\n            paths:\n                - helloGitLab\nYML\n\n\nwin_build = \u003C\u003C~YML\n    image: gcc\n    build:\n        stage: build\n        before_script:\n            - apt update && apt-get install -y mingw-w64\n        script:\n            - x86_64-w64-mingw32-g++ cpp_app/hello-gitlab.cpp -o helloGitLab.exe\n        artifacts:\n            paths:\n                - helloGitLab.exe\nYML\n\n\nFile.open('.linux-gitlab-ci.yml', 'w'){ |f| f.write(linux_build)}\n\nFile.open('.win-gitlab-ci.yml', 'w'){ |f| f.write(win_build)}\n\n```\n\n\nThen in the `triggers` stage, the parent pipeline runs the generated child\npipelines much as in the non-dynamic version of this example but instead\nusing the saved `artifact` files, and the specified `job`.\n\n\nPush all the files you created to a new branch, and for the pipeline result,\nyou should see the three jobs (with one connecting to the two others) and\nthe subsequent two children.\n\n\n![Dynamic parent-child pipeline\nresult](https://about.gitlab.com/images/blogimages/dynamic-pipelines.png){:\n.shadow.medium.center}\n\nThe result of a dynamic parent-child pipeline\n\n{: .note.text-center}\n\n\n## Pipeline flexibility\n\n\nThis blog post showed some simple examples to give you an idea of what you\ncan now accomplish with pipelines. With one parent, multiple children, and\nthe ability to generate configuration dynamically, we hope you find all the\ntools you need to [build CI/CD workflows](/topics/ci-cd/) you need.\n\n\nYou can also watch a demo of Parent-child pipelines below:\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/n8KpBSqZNbk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n",[9,695,1064,917],{"slug":4333,"featured":6,"template":700},"parent-child-pipelines","content:en-us:blog:parent-child-pipelines.yml","Parent Child Pipelines","en-us/blog/parent-child-pipelines.yml","en-us/blog/parent-child-pipelines",{"_path":4339,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4340,"content":4346,"config":4351,"_id":4353,"_type":14,"title":4354,"_source":16,"_file":4355,"_stem":4356,"_extension":19},"/en-us/blog/plugin-instability",{"title":4341,"description":4342,"ogTitle":4341,"ogDescription":4342,"noIndex":6,"ogImage":4343,"ogUrl":4344,"ogSiteName":685,"ogType":686,"canonicalUrls":4344,"schema":4345},"The problem with plugins","For all of the customization, plugins sometimes come at a high price.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673012/Blog/Hero%20Images/plugin-instability.jpg","https://about.gitlab.com/blog/plugin-instability","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The problem with plugins\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-09-27\",\n      }",{"title":4341,"description":4342,"authors":4347,"heroImage":4343,"date":4348,"body":4349,"category":1040,"tags":4350},[715],"2019-09-27","\nWe’ve talked a lot over the past year about how [all-in-one is taking over the marketplace model](/blog/github-launch-continuous-integration/), and we highlighted [CloudBees adding SDM](/blog/jenkins-one-year-later/) in our most recent example. Even with all of the consolidation we’ve seen lately, plugins are still a popular [DevOps solution](/topics/devops/). On the surface, there’s a lot to appreciate: Literally thousands of plugins offer seemingly limitless customization without you having to make large investments in other tools. Need something? Chances are there’s a plugin for that.\n\nJenkins plugins have served as both a selling point **_and_** a downside – but how can a strength also be a weakness? All that customization comes with a few caveats.\n\n## Plugins and security vulnerabilities\n\nJenkins offers more than 1,600 community-contributed plugins. David Fiser over at the TrendLabs Security Intelligence Blog highlighted some [Jenkins security advisories associated with plain-text-stored credentials](https://blog.trendmicro.com/trendlabs-security-intelligence/hiding-in-plain-text-jenkins-plugin-vulnerabilities/) from July and August 2019. There were six plugins affected, one of which has been deprecated. At the time of article publication (August 30), three of the plugins had not been fixed.\n\nTo properly store credentials, a third-party credential provider, such as the `Credentials` plugin, is recommended. Organizations can also use a [`Secret`](https://javadoc.jenkins.io/index.html?hudson/util/Secret.html) to store credentials. Jenkins was proactive in identifying these potential problems but, in the case of plugins, Jenkins can only recommend best practices and notify users once they’re aware of a potential issue. Because the plugins are operated by third parties, there’s also no guarantee any problems will be fixed.\n\nInstalling Jenkins plugins is limited to either a dedicated Jenkins admin or someone with exclusive access to the Jenkins filesystem, but uploading a potentially malicious plugin to the Jenkins plugin site doesn’t require as much authentication.\n\nThe team at CyberArk wanted to see just how easy it would be for an attacker to infiltrate a plugin. Dubbed [Aladdin’s Lamp](https://www.cyberark.com/threat-research-blog/jenkins-plugins-aladdins-lamp-and-the-sultan-of-threats/), the CyberArk team modified the existing Green Balls plugin that changed the plugin image to an image of Aladdin’s lamp. What they inserted discreetly into the code was a capability that gave any unauthenticated remote attacker SYSTEM access to a Jenkins master that installed their plugin with a specially crafted request:\n\n[`http://jenkinsURL:8080/OpenSesame`](http://jenkinsURL:8080/OpenSesame)\n\nTheir experiment was not malicious, of course, but it highlighted just how easy it could be to exploit the plugin ecosystem.\n\n## Plugins and brittle pipelines\n\nIt’s a tall order for users to weigh the pros and cons of more than 1,600 plugins, and many people rely on a plugin’s popularity in order to gauge whether it’s a suitable option. A simple search for a Docker plugin could show almost 26 results, and upon further review, one of the top results has eight plugin dependencies. If a team is using plugins for Docker, Kubernetes, GitLab, Go – those dependencies can really add up, and that’s where teams start seeing brittle pipelines.\n\nTechnology is constantly evolving, and keeping up with all of these dependencies can spell trouble for pipelines. The last thing you want is a broken deployment pipeline because [the pipeline itself is broken vs. the actual software artifact or build that’s being tested](https://harness.io/2018/09/4-reasons-your-jenkins-pipelines-are-brittle/).\n\nA vast majority of Jenkins plugins were created by third-party developers, meaning they can vary in quality and [some plugins lose support without notice](https://thenewstack.io/many-problems-jenkins-continuous-delivery/). Abandoned plugins are out there because their creators have opted to work on something else. Teams have to be diligent with maintaining these plugins with every new Jenkins version, but as any Jenkins admin can tell you, [this process has not always gone over well](https://jenkins.io/blog/shifting-gears/).\n\n## Plugins and maintenance\n\nWe touched on this briefly but admins are mostly in agreement that Jenkins maintenance is, to put it simply, not a great time. There’s a reason why developers often talk about their love/hate relationship with Jenkins – **_yay!_**, there’s a plugin for everything I need, **_oh no!_** I’m a Jenkins plugin maintainer now.\n\nUpgrading one plugin means you’ll likely have to update many others, and many Jenkins admins do this directly on their production Jenkins master. In one example, [Blue Ocean requires dozens of dependencies, many of which you may have no use for](https://cb-technologists.github.io/posts/jenkins-plugins-good-bad-ugly/), such as the Bitbucket Pipeline for Blue Ocean and the GitHub Pipeline for Blue Ocean plugins, even if you don’t use either Bitbucket or GitHub for source control.\n\n## Plugins: Pros and cons\n\nThere are pros and cons to anything and plugins are no exception. There is a lot to love about plugins:\n\n*   Flexibility\n*   Customization\n*   Convenience\n\nAnd there are things to be wary of:\n\n*   Maintenance\n*   Dependencies\n*   Lack of support\n*   Security vulnerabilities\n\nWith Jenkins’s modular architecture there’s a building block for everything you need. However, an ecosystem built entirely on plugins is going to require some discipline, and that means dedicating resources into maintaining that plugin environment.\n\nPlugins can be a great asset for a DevOps team. As CloudBees pointed out, [even GitLab uses plugins](https://docs.gitlab.com/ee/administration/file_hooks.html). We just don’t think you should have to use plugins for really basic tasks. In the end, it’s important for organizations to weigh the pros and cons of different platforms for themselves. You can check out our ebook, “The benefits of single application CI/CD,” and see how we stack up against other CI tools.\n\nCover image by [Fernando Lavin](https://unsplash.com/@filmlav?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/@filmlav?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n",[9,721],{"slug":4352,"featured":6,"template":700},"plugin-instability","content:en-us:blog:plugin-instability.yml","Plugin Instability","en-us/blog/plugin-instability.yml","en-us/blog/plugin-instability",{"_path":4358,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4359,"content":4364,"config":4369,"_id":4371,"_type":14,"title":4372,"_source":16,"_file":4373,"_stem":4374,"_extension":19},"/en-us/blog/positive-outcomes-ci-cd",{"title":4360,"description":4361,"ogTitle":4360,"ogDescription":4361,"noIndex":6,"ogImage":1440,"ogUrl":4362,"ogSiteName":685,"ogType":686,"canonicalUrls":4362,"schema":4363},"4 Benefits of CI/CD","Learn how to implement and measure a successful CI/CD pipeline strategy and help your DevOps team deliver higher quality software, faster!","https://about.gitlab.com/blog/positive-outcomes-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"4 Benefits of CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-06-27\",\n      }",{"title":4360,"description":4361,"authors":4365,"heroImage":1440,"date":4366,"body":4367,"category":1040,"tags":4368},[715],"2019-06-27","\n[CI/CD](/topics/ci-cd/) helps DevOps teams ship higher quality software, faster, for improved software deployment. But is all [CI/CD](/topics/ci-cd/) created equal? What do the benefits of continuous integration, continuous delivery, and continuous deployment look like and how do you know you're on the right track?\n\nIn this four-part series, we talk about modernizing your CI/CD: Challenges, impact, outcomes, and solutions. In [part one](/blog/modernize-your-ci-cd/), we focused on common CI/CD challenges. In [part two](/blog/business-impact-ci-cd/), we talked about the revenue impacts. Today, we’ll talk about what CI/CD can deliver and how to measure its success.\n\nIf these problems hit a little too close to home, stay tuned for part four where we dive deeper into finding the right CI/CD solution for you.\n\n## What are some of the benefits of a good CI/CD strategy?\n\n### 1. Increased speed of innovation and ability to compete in the marketplace\n\nTwo identical companies: One implements [CI/CD technology](/topics/ci-cd/) and the other doesn’t. Who do you think deploys applications faster? While this seems like a silly comparison, because _of course_ the company with more automation deploys faster, there are organizations out there still convinced they don’t need CI/CD because they’re not looking at their competition. Organizations that understand the importance of CI/CD are setting the pace of innovation for everyone else.\n\n### 2. Code in production is making money instead of sitting in a queue waiting to be deployed\n\nOrganizations that have implemented CI/CD are making revenue, satisfying customers, and getting user feedback on the product features they deploy, not waiting for a manual check to see if the code is up to par. They already know the code is good because they have tests that are automated, and continuous delivery means that code is deployed automatically if it meets certain standards. They’ve removed human error and delays from the process so they can ship more code to production.\n\n### 3. Great ability to attract and retain talent\n\nEngineers that can focus on what they’re best at will be happier and more productive, and that has far-reaching impact. Turnover can be expensive and disruptive. A good CI/CD strategy means engineers can work on important projects and not worry about time-consuming manual tasks. They can also work confidently knowing that errors are caught automatically, not right before deployment. This kind of cooperative engineering culture inevitably attracts talent.\n\n### 4. Higher quality code and operations due to specialization\n\nThe development team can focus on dev. The operations team can focus on ops. Bad code rarely makes it to production because continuous testing is automated. Developers can focus on the code rather than the production environment, and operations doesn’t have to feel like a gatekeeper or a barrier. Both teams can work to their strengths, and automated handoffs make for seamless processes for the entire team. [This kind of cooperation makes DevOps possible](/topics/devops/build-a-devops-team/) and improves code quality.\n\n## What capabilities are required to make this happen?\n\n### 1. Robust CI/CD\n\nWhen we use the term “robust,” it’s all about avoiding half-baked or partial solutions. There are several CI/CD solutions out there but there are varying degrees of effectiveness. Continuous integration and continuous delivery go hand in hand, so having a solution that offers both is ideal. The tool you use should offer the automation you need, not just some. If your CI/CD tool is prone to failure or “brittle,” it can be just one more thing to manage. This was precisely why [the team at Ticketmaster replaced Jenkins CI and moved to weekly releases](/blog/continuous-integration-ticketmaster/), decreasing their pipeline execution time from two hours to only _eight minutes_ to build, test, and publish artifacts.\n\n### 2. Containers and Kubernetes\n\nContainers have made a huge impact on the way companies build and deploy code. While it was once difficult to develop applications with a [microservices architecture](/blog/strategies-microservices-architecture/), over the past five years it has become considerably easier with container orchestration tools like Kubernetes, comprehensive CI/CD tools that automate testing and deployments, and APIs that update automatically. Breaking up services so they can run independently reduces dependencies and creates better workflows.\n\n### 3. Functionality for the entire DevOps lifecycle\n\nVisibility is a huge asset when improving DevOps workflows. For some teams, they can have several tools handling different facets of the software development lifecycle (SDLC), which creates integration issues, maintenance issues, visibility issues, and is [just plain expensive](/calculator/roi/) from a cost standpoint. A complex toolchain can also weaken security. In a [Forrester survey of IT professionals](/resources/downloads/201906-gitlab-forrester-toolchain.pdf), 45% said that they had difficulty ensuring security across the toolchain.\n\n## How would you measure the success of a CI/CD strategy?\n\n### 1. Cycle time\n\nCycle time is the speed at which a [DevOps team](/topics/devops/) can deliver a functional application, from the moment work begins to when it is providing value to an end user.\n\n### 2. Time to value\n\nOnce code is written, how long before it’s released? This delay from when code is written to running in production is the time to value, and is a bottleneck for many organizations. Continuous delivery as well as [examining trends in the QA process](/blog/trends-in-test-automation/) can help to overcome this barrier to quick deployments and frequent releases.\n\n### 3. Uptime, error rate, infrastructure costs\n\nUptime is one of the biggest priorities for the ops team, and with a good CI/CD strategy that automates different processes, they should be able to focus more on that goal. Likewise, error rates and infrastructure costs can be easily measured once CI/CD is put in place. Operations goals are a key indicator of process success.\n\n### 4. Team retention rate\n\nHappy developers stick around, so looking at retention rates is a reliable way to gauge how well new development processes and applications are working for the team. It might be tough for developers to speak up if they don’t like how things are going, but looking at retention rates can be one step in identifying potential problems.\n\nThe benefits of a good CI/CD strategy are felt throughout an organization: From HR to operations, teams work better and achieve goals. In such a competitive development landscape, having the right CI/CD in place gives any company an edge.\n\nSo what makes “good” CI/CD? We invite you to compare GitLab CI/CD to other CI tools and see why we were rated #1 in the Forrester CI Wave™.\n",[721,9,875],{"slug":4370,"featured":6,"template":700},"positive-outcomes-ci-cd","content:en-us:blog:positive-outcomes-ci-cd.yml","Positive Outcomes Ci Cd","en-us/blog/positive-outcomes-ci-cd.yml","en-us/blog/positive-outcomes-ci-cd",{"_path":4376,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4377,"content":4382,"config":4387,"_id":4389,"_type":14,"title":4390,"_source":16,"_file":4391,"_stem":4392,"_extension":19},"/en-us/blog/pre-filled-variables-feature",{"title":4378,"description":4379,"ogTitle":4378,"ogDescription":4379,"noIndex":6,"ogImage":2088,"ogUrl":4380,"ogSiteName":685,"ogType":686,"canonicalUrls":4380,"schema":4381},"How pre-filled CI/CD variables will make running pipelines easier","Learn more about this future release and how pre-filled variables will save time and reduce errors.","https://about.gitlab.com/blog/pre-filled-variables-feature","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How pre-filled CI/CD variables will make running pipelines easier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-12-02\",\n      }",{"title":4378,"description":4379,"authors":4383,"heroImage":2088,"date":4384,"body":4385,"category":1040,"tags":4386},[715],"2020-12-02","\n[CI/CD variables](/topics/ci-cd/) are a useful way to customize pipelines based on their environment. But what if you need to override a variable, or what if you need to run a pipeline manually? These scenarios can create problems.\n\n*   What if you don’t know what variables/values to put in?\n*   What happens if you make a mistake?\n\nHaving to enter variables and values manually is tedious and prone to error. Also, a user may not know all the different variables/values they need to enter. In GitLab 13.7, we’re introducing a feature that helps to solve these problems by generating pre-filled variables from your `.gitlab-ci.yml.` file when you run a pipeline.\n\n### What are CI/CD variables?\n\n[CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) are dynamic values assigned to environments. These environment variables affect the way running processes behave on an operating system. Variables allow teams to customize jobs in GitLab CI/CD.\n\nThere are two places where teams can define variables:\n\n*   The `.gitlab-ci.yml.` file\n*   The GitLab Runner `config.toml.` file\n\nCI/CD variables can be very useful, but what if you need to override a variable or manually run a pipeline? You might do this if the results of a pipeline (for example, a code build) are required outside the normal operation of the pipeline. Teams may also opt for manual deployments to production and need to stop the pipeline early. Running a pipeline manually isn’t unusual, but [defining variables](https://docs.gitlab.com/ee/ci/variables/where_variables_can_be_used.html) and entering them in a manual pipeline hasn’t always been a totally smooth process.\n\nFirst, teams need to run a pipeline/job manually and then navigate into the overview. Then, they have to select all the required variables from a drop-down menu on the “Run Pipeline” page. If developers don’t know all the required variables by heart, they will need to check their references and switch back and forth. If there are numerous key/value pairs to enter, this can be especially tedious. \n\n### What are pre-filled variables?\n\nIn 13.7, we’re introducing a feature that will streamline this process. Now the “Run pipeline” form will [generate pre-filled variables](https://gitlab.com/gitlab-org/gitlab/-/issues/30101) for your pipeline based on the variable definitions in your `.gitlab-ci.yml` file. The response to this feature from the GitLab community was enthusiastic.\n\n![pre-filled variables issue](https://about.gitlab.com/images/blogimages/pre-filled-variables.png)\nPeople are excited about pre-filled variables!\n\n### The benefits of pre-filled variables\n\nHaving variables pre-filled is all about increasing efficiency and reducing the small frustrations that make jobs more difficult than they need to be. \n\nPre-filled variables will *reduce:*\n\n*   Frustration with scrolling dropdown values\n*   Friction with choosing the wrong values\n*   Re-running and debugging pipelines due to wrong values\n*   Errors and click actions\n\n![Run Pipeline](https://about.gitlab.com/images/blogimages/Run-pipeline.gif)\nPre-filled variables in action\n\nFor teams that manually deploy to production, pre-filled variables will make it easier during that review step so that everyone with permissions can manually trigger the deployment pipeline. If the reviewer needs to make an exception they can override a variable, if necessary. \n\nPre-filled variables will help teams save time, reduce errors, and make the manual pipeline process a bit smoother. Do you think we're missing something or have ways that we can streamline the process even further? Leave a comment in [the issue](https://gitlab.com/gitlab-org/gitlab/-/issues/30101) and let us know what you think. Everyone can contribute.\n\n### Other future GitLab CI releases\n\nPre-filled variables are only one CI feature in the works. We release [new features](/upcoming-releases/) on the 22nd of every month, and everyone can contribute to these [public](https://handbook.gitlab.com/handbook/values/#public-by-default) issues. \n\n## More on CI/CD\n\n- [Want a more effective CI/CD pipeline? Try our pro tips](/blog/effective-ci-cd-pipelines/)\n- [Webcast: 7 CI/CD hacks](/webcast/7cicd-hacks/)\n- [How to use GitLab’s CI/CD pipeline templates](/blog/get-started-ci-pipeline-templates/)\n\nCover image by [Gerrie van der Walt](https://unsplash.com/photos/m3TYLFI_mDo?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/pipes?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[9,695],{"slug":4388,"featured":6,"template":700},"pre-filled-variables-feature","content:en-us:blog:pre-filled-variables-feature.yml","Pre Filled Variables Feature","en-us/blog/pre-filled-variables-feature.yml","en-us/blog/pre-filled-variables-feature",{"_path":4394,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4395,"content":4401,"config":4407,"_id":4409,"_type":14,"title":4410,"_source":16,"_file":4411,"_stem":4412,"_extension":19},"/en-us/blog/prepare-now-docker-hub-rate-limits-will-impact-gitlab-ci-cd",{"title":4396,"description":4397,"ogTitle":4396,"ogDescription":4397,"noIndex":6,"ogImage":4398,"ogUrl":4399,"ogSiteName":685,"ogType":686,"canonicalUrls":4399,"schema":4400},"Prepare now: Docker Hub rate limits will impact GitLab CI/CD","Learn how Docker Hub's upcoming pull rate limits will affect GitLab pipelines and what you can do to avoid disruptions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662488/Blog/Hero%20Images/blog-image-template-1800x945__3_.png","https://about.gitlab.com/blog/prepare-now-docker-hub-rate-limits-will-impact-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Prepare now: Docker Hub rate limits will impact GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2025-03-24\",\n      }",{"title":4396,"description":4397,"authors":4402,"heroImage":4398,"date":4403,"body":4404,"category":4405,"tags":4406},[1122],"2025-03-24","On April 1, 2025, Docker will implement new [pull rate\nlimits](https://docs.docker.com/docker-hub/usage/) to Docker Hub that may\nsignificantly impact CI/CD pipelines across the industry, including those\nrunning on GitLab. The most significant change is the 100 pulls-per-6-hours\nlimit for unauthenticated users.\n\n\n## What's changing?\n\n\nStarting April 1, Docker will enforce the following pull rate limits:\n\n\n| User type | Pull rate limit per hour | Number of public repositories | Number of private repositories |\n|-----------|----------------|--------------|------------------|\n| Business, Team, Pro (authenticated) | Unlimited (fair use) | Unlimited | Unlimited |\n| Personal (authenticated) | 200 per 6-hour window | Unlimited | Up to 1 |\n| Unauthenticated users | 100 per 6-hour window per IPv4 address or IPv6 /64 subnet | Not applicable | Not applicable |\n\n\n\u003Cp>\u003C/p>\n\nThis is particularly important because:\n\n\n* GitLab's Dependency Proxy currently pulls from Docker Hub as an\nunauthenticated user.\n\n* Most CI/CD pipelines that don't use the Dependency Proxy pull directly\nfrom Docker Hub as unauthenticated users.\n\n* On hosted runners for GitLab.com, multiple users might share the same IP\naddress or subnet, making them collectively subject to this limit.\n\n\n## How this impacts GitLab users\n\n\n**Impact on direct Docker Hub pulls**\n\n\nIf your CI/CD pipelines directly pull images from Docker Hub without\nauthentication, they will be limited to 100 pulls per six-hour window per IP\naddress. For pipelines that run frequently or across multiple projects\nsharing the same runner infrastructure, this will quickly exhaust the limit\nand cause pipeline failures.\n\n\n**Impact on GitLab Dependency Proxy**\n\n\nThe GitLab Dependency Proxy feature allows you to cache Docker images within\nGitLab to speed up pipelines and reduce external dependencies. However, the\ncurrent implementation pulls from Docker Hub as an unauthenticated user,\nmeaning it will also be subject to the 100 pulls-per-6-hours limit.\n\n\n**Impact on hosted runners**\n\n\nFor hosted runners on GitLab.com, we use [Google Cloud's pull-through\ncache](https://cloud.google.com/artifact-registry/docs/pull-cached-dockerhub-images).\nThis mirrors the commonly pulled images and allows us to avoid rate limits.\nJob images defined as `image:` or `services:` in your `.gitlab-ci.yml` file,\nare not affected by rate limits.\n\n\nThings are slightly more challenging whenever images are pulled within the\nrunner environment. The most common use case to pull images during runner\nruntime is to build an image using Docker-in-Docker or Kaniko. In this\nscenario, the Docker Hub image defined in your `Dockerfile` is pulled\ndirectly from Docker Hub and is likely to be affected by rate limits.\n\n\n## How GitLab is responding\n\n\nWe're actively working on solutions to mitigate these challenges:\n\n\n* **Dependency Proxy authentication:** We've added support for Docker Hub\nauthentication in the [GitLab Dependency Proxy\nfeature](https://gitlab.com/gitlab-org/gitlab/-/issues/331741). This will\nallow the Dependency Proxy to pull images from Docker Hub as an\nauthenticated user, significantly increasing the rate limits.\n\n* **Documentation updates:** We've updated our\n[documentation](https://docs.gitlab.com/user/packages/dependency_proxy/#configure-credentials)\nto provide clear guidance on configuring pipeline authentication for Docker\nHub.\n\n* **Internal infrastructure preparation:** We're preparing our internal\ninfrastructure to minimize the impact on hosted runners for GitLab.com.\n\n\n## How you can prepare\n\n\n**Option 1: Configure Docker Hub authentication in your pipelines**\n\n\nFor pipelines that pull directly from Docker Hub, you can configure\nauthentication to increase your rate limit to 200 pulls per six-hour window\n(or unlimited with a paid Docker Hub subscription).\n\n\nAdd Docker Hub credentials to your project or group CI/CD variables (not in\nyour `.gitlab-ci.yml` file). Please refer to our [documentation on using\nDocker\nimages](https://docs.gitlab.com/ci/docker/using_docker_images/#use-statically-defined-credentials)\nfor detailed instructions on setting up the `DOCKER_AUTH_CONFIG` CI/CD\nvariable correctly.\n\n\n**Option 2: Use the GitLab Container Registry**\n\n\nConsider pushing your frequently used Docker images to your [GitLab\nContainer\nRegistry](https://docs.gitlab.com/user/packages/container_registry/). This\neliminates the need to pull from Docker Hub during CI/CD runs:\n\n\n1. Pull the image from Docker Hub.\n\n2. Tag it for your GitLab Container Registry.\n\n3. Push it to your GitLab Container Registry.\n\n4. Update your pipelines to pull from GitLab Container Registry.\n\n\n```\n\ndocker pull busybox:latest\n\ndocker tag busybox:latest $CI_REGISTRY_IMAGE/busybox:latest\n\ndocker push $CI_REGISTRY_IMAGE/busybox:latest\n\n```\n\n\nThen in your `.gitlab-ci.yml`:\n\n\n`image: $CI_REGISTRY_IMAGE/busybox:latest`\n\n\n**Option 3: Use GitLab Dependency Proxy**\n\n\nGitLab's Dependency Proxy feature provides a way to cache and proxy Docker\nimages, reducing external dependencies and rate limit issues.\n\n\nCurrent authentication options:\n\n* GitLab 17.10: Configure Docker Hub authentication for the Dependency Proxy\nusing [GraphQL\nAPI](https://docs.gitlab.com/user/packages/dependency_proxy/#configure-credentials-using-the-graphql-api)\n\n* GitLab 17.11: Use the new UI-based configuration in your group's settings\n(already available on GitLab.com)\n\n\nOnce authentication is properly configured, you can:\n\n\n1. Configure Docker Hub credentials in your group's Dependency Proxy\nsettings:\n  - For GitLab 17.11+ (or current GitLab.com): Navigate to your group's settings > Packages & Registries > Dependency Proxy.\n  - For GitLab 17.10: Use the GraphQL API to configure authentication.\n2. Update your pipelines to use the Dependency Proxy URLs in your CI/CD\nconfiguration:\n\n`image: ${CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX}/busybox:latest`\n\n\n**Option 4: Consider a Docker Hub paid subscription**\n\n\nFor organizations with heavy Docker Hub usage, upgrading to a paid Docker\nsubscription (Team or Business) will provide unlimited pulls, which may be\nthe most straightforward solution.\n\n\n## Best practices to reduce Docker Hub rate limit impact\n\n\nRegardless of which option you choose, consider these best practices to\nminimize Docker Hub rate limit impact:\n\n\n* Use specific image tags instead of `latest` to avoid unnecessary pulls.\n\n* Consolidate your Docker files to use the same base images across projects.\n\n* Schedule less critical pipelines to run outside of peak hours.\n\n* Use caching effectively to avoid pulling the same images repeatedly.\n\n\n**Note:** According to Docker Hub\n[documentation](https://docs.docker.com/docker-hub/usage/pulls/#pull-definition),\nthe pull count is incremented when pulling the image manifest, not based on\nimage size or number of layers.\n\n\n## Timeline and next steps\n\n\n**Now**\n  * Implement authentication for direct Docker Hub pulls.\n  * GitLab.com users can already configure Docker Hub authentication for the Dependency Proxy using either:\n    * The GraphQL API, or\n    * The UI in group settings\n  * Self-managed GitLab 17.10 users can configure Dependency Proxy authentication using the GraphQL API.\n\n**April 1, 2025**\n  * Docker Hub rate limits go into effect.\n\n**April 17, 2025**\n  * GitLab 17.11 will be released with UI-based Dependency Proxy authentication support for self-managed instances. \n\nWe recommend taking action well before the April 1 deadline to avoid\nunexpected pipeline failures. For most users, configuring the Dependency\nProxy with Docker Hub authentication is the most efficient long-term\nsolution.\n\n\n> Have questions or need implementation help? Please visit [this\nissue](https://gitlab.com/gitlab-org/gitlab/-/issues/526605) where our team\nis actively providing support.\n","bulletin-board",[9,1062,495],{"slug":4408,"featured":91,"template":700},"prepare-now-docker-hub-rate-limits-will-impact-gitlab-ci-cd","content:en-us:blog:prepare-now-docker-hub-rate-limits-will-impact-gitlab-ci-cd.yml","Prepare Now Docker Hub Rate Limits Will Impact Gitlab Ci Cd","en-us/blog/prepare-now-docker-hub-rate-limits-will-impact-gitlab-ci-cd.yml","en-us/blog/prepare-now-docker-hub-rate-limits-will-impact-gitlab-ci-cd",{"_path":4414,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4415,"content":4421,"config":4426,"_id":4428,"_type":14,"title":4429,"_source":16,"_file":4430,"_stem":4431,"_extension":19},"/en-us/blog/progressive-delivery-using-review-apps",{"title":4416,"description":4417,"ogTitle":4416,"ogDescription":4417,"noIndex":6,"ogImage":4418,"ogUrl":4419,"ogSiteName":685,"ogType":686,"canonicalUrls":4419,"schema":4420},"Progressive Delivery: How to get started with Review Apps","Progressive Delivery is the next evolution of continuous delivery, and Review Apps are a key enabler.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666841/Blog/Hero%20Images/progressive-delivery-review-apps.jpg","https://about.gitlab.com/blog/progressive-delivery-using-review-apps","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Progressive Delivery: How to get started with Review Apps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2019-04-19\",\n      }",{"title":4416,"description":4417,"authors":4422,"heroImage":4418,"date":4423,"body":4424,"category":718,"tags":4425},[803],"2019-04-19","\nIf you're not familiar with [Progressive Delivery](https://redmonk.com/jgovernor/2018/08/06/towards-progressive-delivery/),\nit's a new set of best practices that is gaining hold for delivering safely and frequently to\nproduction. Although it's not a completely new idea in the same way that continuous\ndelivery originally was, it is a clear evolution of those ideas that brings something\nnew to the table. By taking a step back and considering the corpus of knowledge and experience\ngained over the last 10 years, then applying a bit of systems thinking to\nhow all these different practices interact with emerging technologies, it has set a new\nbaseline for how software delivery can be done effectively.\n\nWe discuss our overall vision for Progressive Delivery on our [CI/CD vision page](/direction/ops/#progressive-delivery),\nwhich also links to a few more resources if you're not up to speed with the concept in general.\n\nIn summary, though, continuous delivery gets you out of the mode of shipping one, big, risky\ndeployment to production, and instead breaks that risk up into many tiny parts – each with a\nfraction of the risk. Progressive Delivery takes this a step further by enabling you to\n[canary test code](https://docs.gitlab.com/ee/user/project/canary_deployments.html) in\nproduction with a small portion of your user base, use [feature flags](https://docs.gitlab.com/ee/operations/feature_flags.html)\nto manage rollout pacing, tie everything together with [tracing](https://docs.gitlab.com/ee/operations/tracing.html),\nand automate the further deployment or rollback of that code depending on how it performs.\n\n## How Review Apps can help enable Progressive Delivery\n\nLet me begin by explaining what GitLab Review Apps are:\n\n[GitLab Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) are\nstaging environments that are automatically created for every branch and/or merge request. They are a collaboration tool\nbuilt into GitLab that helps take the hard work out of providing an environment to\nshowcase or validate product changes. There are a lot of different ways to configure\nthem, but the recommended way is to automatically create review app instances during your\n[merge request pipelines](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html). Doing this\nwill ensure that any merge request that is being considered will have an application\nthat developers can connect to to validate their changes.\n\nWith GitLab, we go a step beyond simply creating the review environment: we make it accessible.\n\nOnce configured, on your merge request page you'll now see a \"view app\" button that, as long as your\n[route maps](https://docs.gitlab.com/ee/ci/review_apps/#route-maps) are configured correctly, will allow your\nusers to jump right to the changed content. Review apps do work even without the route maps – in that case\nthey will take you to the home page of your app – but with them they almost feel like magic.\n\n![Review app](https://docs.gitlab.com/ee/ci/review_apps/img/review_apps_preview_in_mr.png \"Review app\"){: .shadow.medium.center}\n\nReview apps are a powerful tool on their own for enabling quick iteration, but if we think about\nthem in the context of Progressive Delivery, a whole new set of possibilities opens up.\n\n## Review apps for progressive validation\n\nAs mentioned above, a typical Progressive Delivery flow involves using targeted feature flags to validate\nchanges as they flow to production environments. Review apps, if configured to point to production\ndata/endpoints instead of ephemeral data, can serve as a merge request-based window into the changes\nthat are being considered for release.\n\nSome of this will of course depend on your code, your testing procedures, and environments. You may\npoint review apps at production endpoints from the moment they are spun up, or perhaps only later\nin your merge request pipeline after some initial validation.\n\nSince anyone can use these environments, you can point anyone with a stake in the success of the\nnew feature to the review app, and they are able to see the live behavior, using their own real\ndata, immediately in their own web browser. This is incredibly powerful for enabling rapid feedback\nand iteration. As a preview, we're also looking to improve this capability by adding an\n[easy-to-use review interface for collecting feedback](https://gitlab.com/gitlab-org/gitlab-ee/issues/10761)\nright into review apps directly.\n\n## Feature flags and tracing\n\nWe can take this idea even one more step further. Using [per-environment feature flag behaviors](https://docs.gitlab.com/ee/operations/feature_flags.html#define-environment-specs), we\ncan control the behavior of the review app environment in any way that the production environment can\nbe controlled. This opens up the possibility of validating any combination.\n\nFinally, since review apps are built and deployed from GitLab CI/CD, all the predefined CI/CD environment\nvariables are available to the deploy script. You could configure your application to use your\nmerge request ID (`CI_MERGE_REQUEST_ID`) as its unique ID for transaction tracing, tying transactions\nin the system automatically to the appropriate GitLab merge request.\n\n## As you can see, there's a ton of potential for Progressive Delivery here\n\nReview apps don't replace\nthe role of feature flags in a Progressive Delivery pipeline, but they provide an incredible\nsupplement that enables segmented validation in a completely new way. All in all, it's such an exciting time for\ncontinuous delivery – there's so much innovation happening on the process and technology fronts, and I'm\ncertain we're only scratching the surface of where we're headed.\n\nReview Apps is just one way [GitLab CI/CD](/solutions/continuous-integration/) enables Progressive Delivery. Join us for our webcast _Mastering continuous software development_ and learn how GitLab’s built-in CI/CD helps teams implement Progressive Delivery workflows, without the complicated integrations and plugin maintenance.\n\n[Watch the GitLab CI/CD webcast](/webcast/mastering-ci-cd/)\n{: .alert .alert-gitlab-purple .text-center}\n\nIf you have more ideas on how to use review apps even more effectively, or where you see the technology\nevolving next, please share in the comments.\n\nPhoto by [Helloquence](https://unsplash.com/photos/5fNmWej4tAA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[9,695,785],{"slug":4427,"featured":6,"template":700},"progressive-delivery-using-review-apps","content:en-us:blog:progressive-delivery-using-review-apps.yml","Progressive Delivery Using Review Apps","en-us/blog/progressive-delivery-using-review-apps.yml","en-us/blog/progressive-delivery-using-review-apps",{"_path":4433,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4434,"content":4440,"config":4446,"_id":4448,"_type":14,"title":4449,"_source":16,"_file":4450,"_stem":4451,"_extension":19},"/en-us/blog/protecting-manual-jobs",{"title":4435,"description":4436,"ogTitle":4435,"ogDescription":4436,"noIndex":6,"ogImage":4437,"ogUrl":4438,"ogSiteName":685,"ogType":686,"canonicalUrls":4438,"schema":4439},"How to limit access to manual pipeline gates and deployments using GitLab","Let's look at how to use protected environments to set up access controls for production deployments and manual gates.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681105/Blog/Hero%20Images/protect_manual_jobs.jpg","https://about.gitlab.com/blog/protecting-manual-jobs","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to limit access to manual pipeline gates and deployments using GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Thao Yeager\"}],\n        \"datePublished\": \"2020-02-20\",\n      }",{"title":4435,"description":4436,"authors":4441,"heroImage":4437,"date":4443,"body":4444,"category":718,"tags":4445},[4442],"Thao Yeager","2020-02-20","This blog post was originally published on the GitLab Unfiltered\nblog. It was reviewed and republished on\n2020-02-21.\n\n{: .alert .alert-info .note}\n\n\nIn our world of automation, why would anyone want to do something manually?\nManual has become almost synonymous with inefficient. But, when it comes to\nCI/CD pipelines, a properly configured **manual** job can be a powerful way\nto control deployments and satisfy compliance requirements. Let’s take a\nlook at how manual jobs can be defined to serve two important use cases:\nControlling who can deploy, and setting up manual gates.\n\n\n## Limit access to deploy to an environment\n\n\nDeploying to production is a mission-critical occurence that should be\nprotected. Projects with a Kubernetes cluster could benefit from moving to a\ncontinuous deployment (CD) model in which a [branch or merge request, once\nmerged, is auto-deployed to\nproduction](https://docs.gitlab.com/ee/topics/autodevops/index.html#auto-deploy),\nand the absence of human intervention avoids mishaps. But for projects not\nyet configured for CD, let's consider this use case: Imagine a pipeline with\na manual job to deploy to prod, which can be triggered by any user with\naccess to push code. The risk of a unplanned, unintended production\ndeployment is very real.\n\n\nFortunately, it’s possible to use [protected\nenvironments](https://docs.gitlab.com/ee/ci/environments/protected_environments/)\nto prevent just anyone from deploying to production. When [configuring a\nprotected\nenvironment](https://docs.gitlab.com/ee/ci/environments/protected_environments.html#protecting-environments),\nyou can define the roles, groups, or users to whom deploy access is granted.\nThe protected environment can then be defined in a manual job to deploy\nwhich limits who can run it. The configuration could look something like\nthis:\n\n\n```yaml\n\ndeploy_prod:\n  stage: deploy\n  script:\n    - echo \"Deploy to production server\"\n  environment:\n    name: production\n    url: https://example.com\n  when: manual\n  only:\n    - master\n```\n\n\nIn the example above, the keyword `environment` is used to reference a\nprotected environment (as [configured in project\nsettings](https://docs.gitlab.com/ee/ci/environments/protected_environments.html#protecting-environment))\nwith a list of users who can run the job, in this case deploy to the named\nenvironment. Users without access see a disabled **play** button and are\nunable to execute the job.\n\n\n## Add an approval step\n\n\nCompliance rules may specify that approval is required for certain\nactivities in a workflow, even if they aren't technically a deployment step\nthemselves. In this use case, an approval step can also be added in the\npipeline that prompts an authorized user to take action to continue. This\ncan be achieved by structuring your pipeline with an \"approve\" stage\ncontaining a special manual job – for example, the YAML to insert an\napproval stage before deployment could look like this:\n\n\n```yaml\n\nstages:\n  - build\n  - approve\n  - deploy\n\nbuild:\n  stage: build\n  script:\n    - echo Hello!\n\napprove:\n  stage: approve\n  script:\n    - echo Hello!\n  environment:\n    name: production\n    url: https://example.com\n  when: manual\n  allow_failure: false\n  only:\n    - master\n\ndeploy:\n  stage: deploy\n  script:\n    - echo Hello!\n  environment:\n    name: production\n    url: https://example.com\n  only:\n    - master\n```\n\n\nIn the YAML above, `allow_failure: false` [defines the manual job as\n\"blocking\"](https://docs.gitlab.com/ee/ci/yaml/#whenmanual), which will\ncause the pipeline to pause until an authorized user gives \"approval\" by\nclicking on the **play** button to resume. Only the users part of that\nenvironment list will be able to perform this action. In this scenario, the\nUI view of the pipeline in the example CI configuration above would look\nlike this:\n\n\n![Pipeline view of approval stage manual\njob](https://about.gitlab.com/images/blogimages/manual_job_approve_stage_ui.png){:\n.shadow}\n\n\n## Summary\n\n\nAs illustrated in the YAML examples and image above, manual jobs defined\nwith protected environments and blocking attributes are effective tools for\nhandling compliance needs as well as for ensuring there are proper controls\nover production deployments.\n\n\nTell us how using protected environments with manual jobs has secured your\ndeployments or whether blocking manual jobs helps you meet compliance and\nauditing. [Create an issue in the GitLab project issue\ntracker](https://gitlab.com/gitlab-org/gitlab/issues/new) to share your\nfeedback with us.\n\n\nCover image by [Diane Walton](https://unsplash.com/photos/BNnzmBmnPg4) on\n[Unsplash](https://unsplash.com)\n\n{: .note}\n",[9,896,875,695,721],{"slug":4447,"featured":6,"template":700},"protecting-manual-jobs","content:en-us:blog:protecting-manual-jobs.yml","Protecting Manual Jobs","en-us/blog/protecting-manual-jobs.yml","en-us/blog/protecting-manual-jobs",{"_path":4453,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4454,"content":4460,"config":4465,"_id":4467,"_type":14,"title":4468,"_source":16,"_file":4469,"_stem":4470,"_extension":19},"/en-us/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci",{"title":4455,"description":4456,"ogTitle":4455,"ogDescription":4456,"noIndex":6,"ogImage":4457,"ogUrl":4458,"ogSiteName":685,"ogType":686,"canonicalUrls":4458,"schema":4459},"Provision group runners with Google Cloud Platform and GitLab CI","This tutorial will teach you how to set up a new group runner on GitLab.com using Google Cloud Platform in less than 10 minutes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098300/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_623844718_4E5Fx1Q0DHikigzCsQWhOG_1750098300048.jpg","https://about.gitlab.com/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Provision group runners with Google Cloud Platform and GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah Matthies\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2024-11-19\",\n      }",{"title":4455,"description":4456,"authors":4461,"heroImage":4457,"date":4462,"body":4463,"category":718,"tags":4464},[1956,1957],"2024-11-19","Are you interested in hosting your own servers to run your GitLab CI/CD\npipelines but don’t know where to begin? Setting up a GitLab Runner to run\nyour pipelines on your own infrastructure can seem like a daunting task as\nit requires infrastructure knowledge and the know-how to maintain that\ninfrastructure. Typically this process requires the provision of\ninfrastructure, the installing of dependency, and testing that it works with\nyour GitLab instance.\n\n\nThis article highlights how easy it is to easily spin up a GitLab Runner of\nyour own utilizing GitLab’s Google Cloud Integration. Follow this tutorial\nand it will teach you how to set up a new group runner on GitLab.com using\nGoogle Cloud Platform in less than 10 minutes!\n\n\nYou will learn how to:\n\n\n- Create a new group runner.\n\n- Configure the new group runner’s tags and description.\n\n- Register the new group runner by adding in configurations.\n\n- Provision the GitLab Runner utilizing `gcloud cli` and Terraform.\n\n- Have your GitLab Runner pick up its first GitLab CI job.\n\n\n## Prerequisites\n\n- A terminal with Bash installed\n\n- Owner access on a Google Cloud Platform project\n\n- Terraform (or OpenTofu) [Version\n1.5](https://releases.hashicorp.com/terraform/1.5.7/) or greater \n\n- [gcloud CLI](https://cloud.google.com/sdk/docs/install) \n\n- 10 minutes\n\n\n## Tutorial\n\n1. Create a new group runner under __Build > Runners > New Group Runner__.\n\n\n__Note:__ Navigate to the group level.\n\n\n![GitLab Runner setup\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098317126.png)\n\n\n2. Configure the new group runner's tags, description, and any additional\nconfigurations.\n\n\n![New Group Runner\nsetup](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098317127.png)\n\n\n3. Select __Google Cloud__.\n\n\n![Select Google Cloud\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098317129.png)\n\n\n4. Copy your project ID from Google Cloud Platform.\n\n\n![Copy project ID from GCP\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098317131.png)\n\n\n5. Fill out your Google Cloud project ID and choose a region, zone, and type\nof machine you want to use.\n\n\n![Screen to fill out Google Cloud\ninformation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098317132.png)\n\n\n6\\. Once this information is filled out, click **Setup instructions**.\n\n\nRun the bash script provided in Step 1 above.\n\n\n**Note:** This script was saved to a file called `setup.sh` for ease of use.\nYou may copy this right into your terminal if you are confident in\ndebugging.\n\n\n![Setup instructions\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098317134.png)\n\n\n![Script for GitLab\nRunner](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098317135.png)\n\n\n7\\. Create a `main.tf` file and follow the instructions in GitLab.\n\n\n**Note:** If you want to use OpenTofu instead of Terraform, you can still\ncopy the code and only have to adjust the Terraform commands for applying\nthe configuration. \n\n\n![Install and register GitLab Runner\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098317136.png)\n\n\nOnce successfully provisioned, you should be see the following:\n\n\n![GitLab Runner\ncode](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098317137.png)\n\n\n8\\. If you close the instructions and click the **View runners** button, you\nwill now have a newly provisioned runner present with \"Never contacted\" as\nits status.\n\n\n![Newly provisioned runner on\nscreen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098317139.png)\n\n\n9\\. In any project, add the following `.gitlab-ci.yml`.\n\n\n```  \n\nstages:  \n  - greet\n\nhello_job:  \n  stage: greet  \n  tags:  \n    - gcp-runner  \n  script:  \n    - echo \"hello\"  \n```\n\n\nVolia! You have set up your first GitLab Runner utilizing Google Cloud\nPlatform.\n\n\n# Next steps\n\n\nNow that you have provisioned your very own GitLab Runner, consider\noptimizing it for your specific use case. Some things to consider with your\nrunner moving forward:\n\n\n- Is the runner I provisioned the right size? Does it need additional\nresources for my use case? \n\n- Does the GitLab Runner contain all the dependency my builds need?  \n\n- How can I store the GitLab Runner as infrastructure as code?\n\n\n> Make sure to bookmark the [Provisioning runners in Google Cloud\ndocumentation](https://docs.gitlab.com/ee/ci/runners/provision_runners_google_cloud.html)\nfor easy reference.\n",[917,495,785,9,1127,939,232],{"slug":4466,"featured":6,"template":700},"provision-group-runners-with-google-cloud-platform-and-gitlab-ci","content:en-us:blog:provision-group-runners-with-google-cloud-platform-and-gitlab-ci.yml","Provision Group Runners With Google Cloud Platform And Gitlab Ci","en-us/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci.yml","en-us/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci",{"_path":4472,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4473,"content":4479,"config":4483,"_id":4485,"_type":14,"title":4486,"_source":16,"_file":4487,"_stem":4488,"_extension":19},"/en-us/blog/publishing-a11y-reports-in-gitlab-pages",{"title":4474,"description":4475,"ogTitle":4474,"ogDescription":4475,"noIndex":6,"ogImage":4476,"ogUrl":4477,"ogSiteName":685,"ogType":686,"canonicalUrls":4477,"schema":4478},"Publishing Accessibility Reports in GitLab Pages","How to setup the Automated Accessibility Scanning feature in GitLab and publish the report to GitLab Pages.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681293/Blog/Hero%20Images/a11y-report-html.jpg","https://about.gitlab.com/blog/publishing-a11y-reports-in-gitlab-pages","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Publishing Accessibility Reports in GitLab Pages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"James Heimbuck\"}],\n        \"datePublished\": \"2020-05-11\",\n      }",{"title":4474,"description":4475,"authors":4480,"heroImage":4476,"date":3716,"body":4481,"category":978,"tags":4482},[3696],"{::options parse_block_html=\"true\" /}\n\n\n\n\nAt GitLab, we believe [everyone can\ncontribute](/company/strategy/#contribute-with-gitlab) and we build software\nthat reinforces this concept and helps others live up to that value. We also\nbelieve that bringing test data to developers as quickly as possible\nfollowing a commit is one of the best ways to shorten cycle times and\ndeliver features to customers more efficiently. The Automated Accessibility\ntesting in GitLab is one area of that testing.\n\n\nBut how can we share the results of these accessibility scans with others in\nour organization outside of the context of the Merge Request? Taking\ninspiration from another [blog\npost](https://about.gitlab.com/blog/publish-code-coverage-report-with-gitlab-pages/)\nand making use of [GitLab\nPages](https://docs.gitlab.com/ee/user/project/pages/) I set out to do just\nthat.\n\n\n## What is accessibility testing? \n\n\nI talked about accessibility testing, why it's important, and our vision for\nthis category in a [previous\npost](https://about.gitlab.com/blog/introducing-accessibility-testing-in-gitlab/).\nIt's worth your time to take a few minutes and read that first if you\nhaven't already.\n\n\nIf you read the blog, welcome back! Now, let's get to HOW you can use this\nnew feature. After some initial testing, I wanted to record a video showing\nhow to use this new feature. I ran into some problems though, some of my own\nmaking and some unexpected. I thought a blog would be great follow-up to\nthat [video](https://www.youtube.com/watch?v=LsW5D5HhuyE) and help explain\nsome of what I ran into. Let's get to it!\n\n\n## Setting up accessibility testing in GitLab\n\n\nIntroduced as part of the [Minimum Viable\nChange](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc)\nto make testing accessibility easier, we created a template that can be\nincluded into your project's .gitlab-ci.yml file and adds the accessibility\ntesting job to your pipeline. You can do this at any point throughout your\npipeline but, ultimately, we want to decrease that cycle time between when a\nchange is made and when an accessibility issue is found for the developer.\nTo accomplish this, we will run the job AFTER our change is deployed to a\nreview application.\n\n\nI created a walkthrough for the GitLab Unfiltered YouTube channel to walk\nthrough the process of setting this up. After some trial and error I got\nthis working. The relevant portion of the resulting .gitlab-ci.yml entry is\nbelow.\n\n\n```yml\n\n\nstages:\n - build\n - test\n - deploy review\n - deploy staging\n - accessibility\n - deploy production\n - production tests\n - cache\n\ncache:\n  key: ${CI_COMMIT_REF_SLUG}\n  policy: pull\n  paths:\n    - node_modules/\n\ninclude:\n  - template: \"Verify/Accessibility.gitlab-ci.yml\"\n\nvariables:\n  STAGING_DOMAIN: nimblealizer-staging.surge.sh\n  PRODUCTION_DOMAIN: nimblealizer.surge.sh\n  a11y_urls: \"http://nimblealizer-staging.surge.sh\"\n\n```\n\n\nTo summarize what changed to add the accessibility job:\n\n\n1. Add the stage for accessibility. It is important to note that this\nhappens AFTER the deploy to staging, which is the site I want to scan.\n\n2. Include the template that runs the test.\n\n3. Add the ally_urls variable so the template knows what to scan. In this\ncase I added the staging site URL to scan.\n\n\n## What happens now?\n\n\nAfter committing this change, a pipeline will kickoff that builds the\nwebsite, runs some tests, deploys to staging, and then runs the\naccessibility scan.\n\n\nThe result of this scan is shown on the Merge Request page just by including\nthe template because it is using the `artifacs:expose_as` keyword. This is\ngreat news for the developer since the report is now easy to view. The Pa11y\nengine also produces a  an easy to read report that explains where issues\nare in the code and provide links to information about how how to resolve\nthem.\n\n\n![Accessibility report as a build\nartifact](https://about.gitlab.com/images/blogimages/publish_a11y_reports/a11y-merge-request-build-artifact.png){:\n.shadow}\n\nThe resulting build artifact on the Merge Request Pages\n\n{: .note.text-center}\n\n\nBut what if we wanted to share this report across the organization, or even\nbetter link to it from other places like group dashboards? Then we have an\nissue. The job value will always be changing and we don't want to force\nother things to update to reflect our change. What if instead we could\npublish this report to the same place every time, so that the latest version\nwas always at the same URL?\n\n\n## GitLab pages to the rescue!\n\n\nIn my 6 months as the Product Manager for the Testing categories, I had\nprobably already sent the link to this [excellent\nblog](https://about.gitlab.com/blog/publish-code-coverage-report-with-gitlab-pages/)\nfrom Grzegorz a dozen or more times to customers, prospects or coworkers\nexplaining how to publish a coverage report through Pages. I had a strong\nsuspicion that we could do the same thing with the HTML report that came out\nof the accessibility scan. I followed along with the blog post and after\nsome trial and error, I was able to get the deploy job running and the\naccessibility report published! All I had to do was navigate to where pages\npublishes by default and . . . well dang it.\n\n\n![404\npage](https://about.gitlab.com/images/blogimages/publish_a11y_reports/a11y-404.png){:\n.shadow}\n\nThat didn't go according to plan\n\n{: .note.text-center}\n\n\nAfter I ended the video I realized my mistake and made some changes to the\n.gitlab-ci.yml in order to publish the report in a cleaner fashion. Now\nafter moving the generated file to the public directory it is renamed\nindex.html. You can see this in the example project's [.gitlab-ci.yml\nfile](https://gitlab.com/jheimbuck_gl/my-static-website/-/blob/master/.gitlab-ci.yml).\nYou can see the latest report\n[here](https://jheimbuck_gl.gitlab.io/my-static-website/).\n\n\n## Summary\n\n\nSo I spent an hour and a half of wall clock time I got it all working which\nwasn't great but overall not bad since I hadn't tried it before. As I said\nin the video I thought a blog would help explain some of the issues I ran\ninto and help you get this setup done quicker. I hope this post has inspired\nyou to add an accessibility job to your existing Gitlab pipeline and maybe\neven post that report to a Pages site so it is available for more of your\nteam to use.\n",[9,695],{"slug":4484,"featured":6,"template":700},"publishing-a11y-reports-in-gitlab-pages","content:en-us:blog:publishing-a11y-reports-in-gitlab-pages.yml","Publishing A11y Reports In Gitlab Pages","en-us/blog/publishing-a11y-reports-in-gitlab-pages.yml","en-us/blog/publishing-a11y-reports-in-gitlab-pages",{"_path":4490,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4491,"content":4497,"config":4503,"_id":4505,"_type":14,"title":4506,"_source":16,"_file":4507,"_stem":4508,"_extension":19},"/en-us/blog/python-rust-and-gitlab-ci",{"title":4492,"description":4493,"ogTitle":4492,"ogDescription":4493,"noIndex":6,"ogImage":4494,"ogUrl":4495,"ogSiteName":685,"ogType":686,"canonicalUrls":4495,"schema":4496},"From idea to production with Python, Rust and GitLab CI","GitLab hero Mario Garcia demos the intricate process at GitLab Commit London.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678507/Blog/Hero%20Images/lightbulb.jpg","https://about.gitlab.com/blog/python-rust-and-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Bringing your application from idea to production using Python, Rust, and GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-11-15\",\n      }",{"title":4498,"description":4493,"authors":4499,"heroImage":4494,"date":4500,"body":4501,"category":300,"tags":4502},"Bringing your application from idea to production using Python, Rust, and GitLab CI",[1245],"2019-11-15","During his talk at GitLab Commit London, GitLab Hero [Mario\nGarcía](https://gitlab.com/mattdark), explains how he troubleshooted his way\nthrough numerous roadblocks to take his Firebase application from\ndevelopment to production using Rust, Python and GitLab CI.\n\n\n## Rewriting from Python to Rust\n\n\n### What is Rust?\n\n\nWhile Python is a household name among developers, Rust is the new kid on\nthe block when it comes to a systems programming language.\n\n\n[Rust](https://www.rust-lang.org/) was developed by Mozilla is giving to the\nworld, it's been in development since 2009 with a first stable version\nreleased in May 2015 and it aims to improve memory usage while maintaining\nperformance and speed. Mario, who is a Mozilla representative, dedicated\nhimself to learning Rust in late 2015. He started this journey by reading\nthe Rust book, [solving programming\nexercises](https://exercism.io/tracks/rust), migrating Python code to Rust,\nand then rewriting one of his [personal projects, a gallery for reveal.js\npresentations, in Rust](https://gitlab.com/mattdark/reveal-js-gallery).\n\n\nReveal-js is a framework for creating presentations using HTML, and allows\nthe user to store speaker notes, images, and more in a presentation gallery.\nMario first wrote his gallery app in Python but migrated the project into\nRust while he was learning the new language and found the process to be\nrelatively painless. But it wasn’t long before Mario hit a bump in the road\nwhen it came to using Rust for other projects.\n\n\n### Problems with Rust\n\n\n“I was working on another project that I applied to the Mozilla Open Leaders\nprogram two years ago,” said Mario. “And for this project I was using [Cairo\nSVG Python library](https://cairosvg.org/). I needed this specific library\nbecause I was converting SVG files to PDF. So that's how I found out that it\nwas _impossible_ to rewrite this specific part with Rust because there is no\nalternative available in Rust for this library.”\n\n\nNot only did Rust lack an alternative to the CairoSVG Python library, but\nthere was also no crates (Rust libraries) for Firebase. Mario needed\nFirebase for his project that takes the database of speaker information and\nautomatically generates certificates of participation.\n\n\nMario was presenting an example of a web app at Google I/O Extended on how\nto use Rust and Firebase with web apps. But there was no functional library\nin Rust that could connect with Firebase and retrieve data from the\ndatabase.\n\n\nMario came up with a solution: use Python.\n\n\n_More of a video person? Watch Mario’s entire presentation from GitLab\nCommit London in the video below, or follow along step by step in this blog\npost._\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/BYfJBa_79Xo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n## Using Python and Rust together\n\n\nIn his presentation at GitLab Commit London, Mario demonstrated how he\nmanaged to build a Firebase web application in Rust using Python, and deploy\nit using GitLab CI so fellow GitLab users can try to replicate his process,\nor get some input if they're also having difficulties.\n\n\n### Configure your environment\n\n\nThe first step is to make sure that your environment is properly configured.\nTo use both Python, Rust, and GitLab CI, you’ll need the following on your\nmachine:\n\n\n*   Git\n\n*   [GCC](https://crates.io/crates/gcc)\n    *   Rust needs a C compiler and Cargo, which is the package manager for Rust projects\n*   Rust\n    *   Nightly mode for this project\n    *   Cargo\n*   Python 3.5+\n    *   [pipenv](https://github.com/pypa/pipenv) for managing dependencies\n\nInstall Rust using [Rustup](https://rustup.rs/) by typing the code below\ninto your terminal.\n\n\n`curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh`\n\n\nYou’ll also need to install bindings to run Python code directly from Rust,\nand that will also help with writing Python models using Rust code. Mario\nrecommends [CPython](https://crates.io/crates/cpython)and\n[Py03](https://crates.io/crates/pyo3), but used CPython in this demo.\n\n\n### Kick-start your project\n\n\nNext, Mario describes the general process for creating a project using\nPython and Rust.\n\n\nCargo is a package manager for Rust projects, and will create a Cargo.toml\nfile and src/ directory when its run. The Cargo.toml file is the manifest\nfor the application and includes the dependencies the project requires.\nWithin the src/ file is a [main.rs\nfile](https://gitlab.com/mattdark/firebase-example/blob/master/src/main.rs)\nthat contains an example of a Rust application.\n\n\nThe next step is to move through the src/ directory Cargo created to set up\nthe default toolchain for the project.\n\n\n```ruby\n\n[package]\n\nname = \"firebase_sample\"\n\nversion = \"0.1.0\"\n\nauthors = [\"mattdark\"]\n\nedition = \"2018\"\n\n[dependencies]\n\ncpython = \"0.3\"\n\nserde = \"1.0.99\"\n\nserde_derive = \"1.0.99\"\n\nserde_json = \"1.0.40\"\n\nrocket = \"0.4.2\"\n\n[dependencies.rocket_contrib]\n\nversion = \"0.4\"\n\nfeatures = [\"handlebars_templates\"]\n\n```\n\n\nThe Cargo.toml file will show the name of the application, the version,\nauthors etc. And if you’re working on Linux, it will take the user of your\nsystem and put it as the author of the project.\n\n{: .note}\n\n\n“The dependencies that we need for the project are CPython for the Python\npart, [Serde](https://serde.rs/), which is a library that help us with\nreading information for files like JSON, and Rocket, which is a web\nframework for Rust,” said Mario.\n\n\nNext, set the [Nightly version of\nRust](https://doc.rust-lang.org/1.2.0/book/nightly-rust.html) as the default\ntoolchain for the project.\n\n\nAdd a ‘python’ directory to src/ directory, where you’ll be adding the\nPython models required for this project to this directory.\n\n\nOnce the src/python is set-up, add the Pipfile or [requirements.txt\nfile](https://gitlab.com/mattdark/firebase-example/blob/master/requirements.txt)\nfor the dependencies of the Python module to the directory.\n\n\n```ruby\n\n[[source]]\n\nname = \"pypi\"\n\nurl = \"https://pypi.org/simple\"\n\nverify_ssl = true\n\n[dev-packages]\n\n[packages]\n\nfirebase = \"*\"\n\npython-jwt = \"*\"\n\ngcloud = \"*\"\n\nsseclient = \"*\"\n\npycrypto = \"*\"\n\nrequests-toolbelt = \"*\"\n\n[requires]\n\npython_version = \"3.7.3\"\n\n```\n\n\nThe Pipfile is an example of a project used for Firebase. Included here is\nall the dependencies we need for Firebase in the file, as well as the Python\nversion in use.\n\n{: .note}\n\n\nNext write the Rust code in src/main.rs and add the Python scripts in\nsrc/python.\n\n\n### Writing the Python code\n\n\nMario’s Firebase application is designed to rake a database of speaker\ninformation and automatically generate certificates of participation in PDF\nformat.\n\n\n```\n\n{\n  \"slides\" : {\n    \"privacymatters\" : {\n      \"description\" : \"Talk about privacy & security\",\n      \"file\" : \"privacy-matters.md\",\n      \"id\" : \"2\",\n      \"screenshot\" : \"/img/screenshot/privacy-matters.png\",\n      \"theme\" : \"mozilla.css\",\n      \"title\" : \"Why Privacy Matters?\",\n      \"url\" : \"privacy-matters\"\n    },\n    \"rust101\" : {\n      \"description\" : \"Introduction to Rust\",\n      \"file\" : \"rust-101.md\",\n      \"id\" : \"1\",\n      \"screenshot\" : \"/img/screenshot/rust-101.png\",\n      \"theme\" : \"mozilla.css\",\n      \"title\" : \"Rust 101\",\n      \"url\" : \"rust-101\"\n    },\n    \"rustrocket\" : {\n      \"description\" : \"Building Web Apps with Rust + Rocket\",\n      \"file\" : \"rust-rocket.md\",\n      \"id\" : \"3\",\n      \"screenshot\" : \"/img/screenshot/rust-rocket.png\",\n      \"theme\" : \"mozilla.css\",\n      \"title\" : \"Rust + Rocket\",\n      \"url\" : \"rust-rocket\"\n    },\n    \"whyrust\" : {\n      \"description\" : \"What is Rust and Why Learn it?\",\n      \"file\" : \"why-rust.md\",\n      \"id\" : \"4\",\n      \"screenshot\" : \"/img/screenshot/why-rust.png\",\n      \"theme\" : \"mozilla.css\",\n      \"title\" : \"Why Rust?\",\n      \"url\" : \"why-rust\"\n    }\n  }\n}\n\n```\n\n{: .language-ruby}\n\n\nInformation about Mario’s Firebase application lives in this JSON file of\nthe Firebase database.\n\n{: .note}\n\n\nThe application is written in Rust, and therefore needed a Firebase\nconnector. But since the is not a functional Firebase crate, Mario had to\nthink outside the box and use the Python library.\n\n\n```\n\nimport json\n\nfrom firebase import Firebase\n\ndef read_data(self):\n    config = {\n        \"apiKey\": \"APIKEY\",\n        \"authDomain\": \"fir-speakers.firebaseapp.com\",\n        \"databaseURL\": \"https://fir-speakers.firebaseio.com\",\n        \"projectId\": \"fir-speakers\",\n        \"storageBucket\": \"\",\n        \"messagingSenderId\": \"MESSAGINGSENDERID\"\n    }\n    firebase = Firebase(config)\n    speaker = list()\n    db = firebase.database()\n    all_speakers = db.child(\"speakers\").get()\n    for x in all_speakers.each():\n        speaker.append(x.val())\n    s = json.dumps(speaker)\n    return s\n```\n\n{: .language-ruby}\n\n\n“For the Python part of the project, we have to connect to the Firebase\ndatabase, retrieve the data and save it to a variable that later we will\nconvert to JSON so that Rust can correctly rake the data and pass it to the\nHTML5,” said Mario.\n\n\n### Troubleshooting\n\n\nThere was a profound lack of documentation about how to use Rust and Python\ntogether to build a Firebase application, and Mario ran into even more\nhurdles as he tried to troubleshoot.\n\n\nThe two major problems that he was trying to solve were:\n\n\n*   Calling a Python script (.py) from Rust\n\n*   Passing a value from Rust to Python\n\n\n“In the Github repositories for these projects – well at least for the\nlibrary that I'm using – there is no information about how you can do those\ntasks,” said Mario.\n\n\nAfter hours of researching and testing, he discovered a solution.\n\n\n### Building the Project\n\n\nMario was able to run the Python script from Rust and execute the function\nthat connects to the Firebase database. Once connected to the Firebase\ndatabase, the process will retrieve the data and funnel it back to Rust as\nJSON.\n\n\n![Rust\ncode](https://about.gitlab.com/images/blogimages/python_and_rust_post/rust-code.jpg){:\n.shadow.medium.center}\n\n\nAfter some troubleshooting, Mario discovered the proper code to run in Rust\nto bridge the gap between Rust and the Firebase application.\n\n{: .note-text.center}\n\n\nNext, the Rust code will convert the values into a HashMap, and pass that\ninformation to an HTML file.\n\n\nNow that the project is built, it’s time to run it using:\n\n\n```\n\ncargo run\n\npipenv run cargo run\n\n```\n\nTo see your project type `localhost:8000` into the web browser.\n\nThe result should look similar to what you see here and in the [GitLab\nproject](https://gitlab.com/mattdark/reveal-js-gallery).\n\n\n![GitLab project\npreview](https://about.gitlab.com/images/blogimages/python_and_rust_post/gitlabproject.jpg){:\n.shadow.medium.center}\n\n\n## Deploying the application with GitLab CI\n\n\n### Dockerize the application\n\n\nTo configure for GitLab CI, Mario had to choose a Docker image for running\nthe test and deployment. There is a custom Docker image for Rust that can be\ncustomized to fit the specific version for Rust, which in this case is Rust\nNightly.\n\n\n`rustlang/rust:nightly`\n\n\n“The problem is that the Python version that is installed in these Docker\nimage is based on Debian image itself, so we need pipenv and we need other\ntools to be installed,” said Mario.\n\n\nSo Mario customized the Docker image and generated a second one that has the\npipenv components.\n\n\n### Create the repository\n\n\nNow that the Docker images are configured for the application, it’s time to\ncreate the repository and upload the code using the Terminal or GitKraken.\n\n\nThe next – and arguably the most important – step in the process is\n**documentation**. Mario urges all users to upload any and all relevant\nfiles to the repository, such as the README, LICENSE, CODE_OF_CONDUCT.md,\netc.\n\n\nOnce the necessary files are uploaded into the repository, it’s time to\nstart configuring for GitLab CI.\n\n\nMario recommends using Gitignore.io to the .gitignore file for the\ntechnologies being used for the project (in this case, Rust or Python).\nThere are three key files that need to be written to configure the pipelines\nrequired for running GitLab CI:\n\n\n*   **Procfile**: A way to tell a platform like Heroku what is the binary\nfile for the project. Since the project is being developed with Rust, it\nwill generate a binary file that needs to be executed.\n\n*   **RustConfig**: Contains the version of Rust we are using for the\nproject.\n\n*   **Rocket.toml**: Can be used to specify the configuration parameters for\nthe environment.\n\n\nYou can find examples of these files in the [Firebase example project on\nGitLab](https://gitlab.com/mattdark/firebase-example/tree/prod).\n\n\n### GitLab CI\n\n\nAll of these efforts go into preparing the application for deployment using\nGitLab CI. Deployment with GitLab CI is simple, because each stage of the\ndeployment process lives in a yaml file. [Mario’s gitlab-ci.yml\nfile](https://gitlab.com/mattdark/firebase-example/blob/master/.gitlab-ci.yml)\nonly includes the build and production stages, but [more comprehensive\ninformation about GitLab CI is available\nhere](https://docs.gitlab.com/ee/ci/).\n\n\n## Document, document, document\n\n\nThe lack of documentation created significant delays for Mario as he tried\nto get his Firebase application off the ground. While in this case the\ninformation he required was difficult to track down even in English, there\nare even more substantial barriers for non-native English speakers or\nnon-English speaking programmers.\n\n\n>>“I'm from Mexico, so I'm living in a Spanish-speaking country and I\nstarted learning English 15 years ago. That means that I'm in a privileged\nposition. When we are writing the documentation sometimes, we forget that\nnot many people have the opportunity to learn English,” said Mario. “I'm\ntalking about English because most of the information and documentation of\ntechnologies that are available in this language. So if we live in a\nnon-English speaking country, don't forget to write the documentation in our\nnative language.”\n\n\nHis comments resonated strongly with the GitLab Commit London audience.\n\n\n{::options parse_block_html=\"false\" /}\n\n\n\u003Cdiv class=\"center\">\n\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">This is so\nimportant for accessibility.\u003Cbr>\u003Cbr>Same goes for filling documentation full\nof jargon and marketing terms.\u003Cbr>\u003Cbr>Documentation is there to inform those\nwho don&#39;t have the knowledge, presuming knowledge just furthers a toxic\nculture of gatekeeping. \u003Ca\nhref=\"https://t.co/k7EILtHuvy\">pic.twitter.com/k7EILtHuvy\u003C/a>\u003C/p>&mdash;\nMatt Smith (@Harmelodic) \u003Ca\nhref=\"https://twitter.com/Harmelodic/status/1181946002720411648?ref_src=twsrc%5Etfw\">October\n9, 2019\u003C/a>\u003C/blockquote> \u003Cscript async\nsrc=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\n\u003C/div>\n\n\nJoin us at GitLab Commit San Francisco to hear about the innovative ways\nusers like Mario are using GitLab and other open source technologies!\nRegistration information is available below.\n\n\nCover image by [Jack Carter](https://unsplash.com/@carterjack) on\n[Unsplash](https://unsplash.com/s/photos/lightbulb).\n\n{: .note}\n",[9,268,763],{"slug":4504,"featured":6,"template":700},"python-rust-and-gitlab-ci","content:en-us:blog:python-rust-and-gitlab-ci.yml","Python Rust And Gitlab Ci","en-us/blog/python-rust-and-gitlab-ci.yml","en-us/blog/python-rust-and-gitlab-ci",{"_path":4510,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4511,"content":4517,"config":4522,"_id":4524,"_type":14,"title":4525,"_source":16,"_file":4526,"_stem":4527,"_extension":19},"/en-us/blog/quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform",{"title":4512,"description":4513,"ogTitle":4512,"ogDescription":4513,"noIndex":6,"ogImage":4514,"ogUrl":4515,"ogSiteName":685,"ogType":686,"canonicalUrls":4515,"schema":4516},"Quick setup of a GKE Cluster with ArgoCD pre-installed using Terraform","Use this tutorial as a great starting point to manage your cluster entirely through GitOps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665989/Blog/Hero%20Images/AdobeStock_618473457.jpg","https://about.gitlab.com/blog/quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Quick setup of a GKE Cluster with ArgoCD pre-installed using Terraform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Ing\"},{\"@type\":\"Person\",\"name\":\"Siddharth Mathur\"}],\n        \"datePublished\": \"2024-01-31\",\n      }",{"title":4512,"description":4513,"authors":4518,"heroImage":4514,"date":3050,"body":4520,"category":718,"tags":4521},[1957,4519],"Siddharth Mathur","This tutorial will walk you through setting up a Google Kubernetes Engine\n(GKE) Cluster with ArgoCD pre-installed, utilizing Terraform, in less than\n10 minutes. This will be a great starting point to manage your cluster\nentirely through GitOps.\n\n\n### Prerequisites\n\n- GCP account with permissions to provision a GKE Cluster\n\n- Kubectl client v1.23.9\n\n- Kubectl server v1.23.16-gke.1400\n\n- Working knowledge of GKE\n\n- Basic knowledge of ArgoCD\n\n\n#### An overview of this tutorial is as follows:\n\n- Set up the GitLab Terraform GKE ArgoCD Template \n\n- Connect to your GKE Cluster\n\n- Grab the ArgoCD Initial Admin Secret\n\n- Log into ArgoCD \n\n- Enjoy your Kubernetes Cluster with ArgoCD!\n\n\n#### Set up the GitLab Terraform GKE ArgoCD template\n\n\nStart by importing the example project by URL -\n[https://gitlab.com/projects/new#import_project](https://gitlab.com/projects/new#import_project).\n\n\nTo import the project:\n\n1. In GitLab, on the top bar, select **Main menu > Projects > View all\nprojects**.\n\n2. On the right of the page, select **New project**.\n\n3. Select **Import project**.\n\n4. Select **Repository by URL**.\n\n5. For the Git repository URL:\n    - [GitLab Terraform GKE ArgoCD](https://gitlab.com/demos/infrastructure/gitlab-terraform-gke-argocd)\n6. Complete the fields and select **Create project**.\n\n\n#### Add in your cloud credentials to CI/CD variables\n\n\n1. To authenticate GCP with GitLab, create a GCP service account with the\nfollowing roles: **Compute Network Viewer, Kubernetes Engine Admin, Service\nAccount User, and Service Account Admin**. Both User and Admin service\naccounts are necessary. The User role impersonates the default service\naccount when creating the node pool. The Admin role creates a service\naccount in the kube-system namespace.\n\n2. **Download the JSON file** with the service account key you created in\nthe previous step.\n\n3. On your computer, encode the JSON file to base64 (replace\n/path/to/sa-key.json to the path to your key):\n\n\n```\n\nbase64 -i /path/to/sa-key.json\n\n```\n\n\n4. Use the output of this command as the **BASE64_GOOGLE_CREDENTIALS**\nenvironment variable in the next step.\n\n\nOn the left sidebar, select **Settings > CI/CD. Expand Variables**.\n\n5. Set the variable **BASE64_GOOGLE_CREDENTIALS** to the base64 encoded JSON\nfile you just created.\n\n6. Set the variable **TF_VAR_gcp_project** to your GCP’s project name.\n\n\n![simpleargocd - image\n1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd_-_image_1.png)\n\n\n#### Run GitLab CI to deploy your Kubernetes cluster with ArgoCD Installed.\n\n\n![simpleargocd - image\n2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd_-_image_2.png)\n\n\n#### Connect to your GKE Cluster through your terminal using the following\nbash command.\n\n\n```bash\n\ngcloud container clusters get-credentials gitlab-terraform-gke-argocd\n--region us-central1 --project \u003Cproject-name>\n\n```\n\n\n![simpleargocd-image3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd-image3.png)\n\n\n#### Expose the Initial Admin Secret through your terminal using the\nfollowing bash command. Make sure you save this password for later.\n\n\n```bash\n\nkubectl -n argocd get secret argocd-initial-admin-secret -o\njsonpath=\"{.data.password}\" | base64 -d\n\n```\n\n\n#### Port Forward ArgoCD to your localhost 8080 through your terminal using\nthe following bash command. Go to Chrome localhost:8080 afterwards.\n\n\n```bash\n\nkubectl port-forward svc/argocd-server -n argocd 8080:443\n\n```\n\n\n#### Enter your admin and `Initial Admin Secret` to the login page.\n\n\n![simpleargocd - image\n4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd_-_image_4.png)\n\n\n#### Voila! You've bootstrapped your GKE cluster with ArgoCD. Enjoy your\nGitOps!\n\n\n![simpleargocd - image\n5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683298/Blog/Content%20Images/simpleargocd_-_image_5.png)\n\n\n### Next steps\n\nWe recommend reviewing [setting up Review Ops with\nArgoCD](https://about.gitlab.com/blog/how-to-provision-reviewops/)! \n\n\n### References\n\n- [GitLab Learn Labs - Infrastructure\nWebinar](https://gitlab.com/gitlab-learn-labs/webinars/infrastructure/gitlab-terraform-gke-argocd)\n\n- [Getting started with\nArgoCD](https://argo-cd.readthedocs.io/en/release-2.0/getting_started/)\n\n\n### Related posts\n\n- [Simple Kubernetes management with\nGitLab](https://about.gitlab.com/blog/simple-kubernetes-management-with-gitlab/)\n\n- [How to provision\nReviewOps](https://about.gitlab.com/blog/how-to-provision-reviewops/)\n\n- [The ultimate guide to GitOps with\nGitLab](https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab/)\n",[549,2619,9,696,1127],{"slug":4523,"featured":6,"template":700},"quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform","content:en-us:blog:quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform.yml","Quick Setup Of A Gke Cluster With Argocd Pre Installed Using Terraform","en-us/blog/quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform.yml","en-us/blog/quick-setup-of-a-gke-cluster-with-argocd-pre-installed-using-terraform",{"_path":4529,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4530,"content":4536,"config":4541,"_id":4543,"_type":14,"title":4544,"_source":16,"_file":4545,"_stem":4546,"_extension":19},"/en-us/blog/quick-start-guide-for-gitlab-workspaces",{"title":4531,"description":4532,"ogTitle":4531,"ogDescription":4532,"noIndex":6,"ogImage":4533,"ogUrl":4534,"ogSiteName":685,"ogType":686,"canonicalUrls":4534,"schema":4535},"Quickstart guide for GitLab Remote Development workspaces","Learn how to create a workspace from your GitLab account and work directly from the remote development environment.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664219/Blog/Hero%20Images/2023-06-22-quickstart-workspaces-cover-image2.png","https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Quickstart guide for GitLab Remote Development workspaces\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-06-26\",\n      }",{"title":4531,"description":4532,"authors":4537,"heroImage":4533,"date":4538,"body":4539,"category":718,"tags":4540},[1835],"2023-06-26","GitLab 16.0 introduced [Remote Development workspaces\n(beta)](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/#remote-development-workspaces-available-in-beta-for-public-projects),\nan exciting addition to the GitLab platform that empowers teams to build and\ndeliver software more efficiently.\n\n\nThis guide provides step-by-step instructions on how to create a workspace\ndirectly from your GitLab account and work directly from the remote\ndevelopment environment. You will work in the Web IDE, a Visual Studio Code\nbrowser version, seamlessly integrated into the workspace. \n\n\nFrom this quick start, you will learn how to create a workspace, use the Web\nIDE Terminal to install dependencies or start your server, and view your\nrunning application. \n\n\nTo learn more about Remote Development in GitLab, we recommend reading this\ninformative blog post, \"[A first look at\nworkspaces](https://about.gitlab.com/blog/introducing-workspaces-beta/),\"\nand the [workspaces docs](https://docs.gitlab.com/ee/user/workspace/).\n\n\nHere are the steps covered in this tutorial:\n\n\n- [Prerequisites](#prerequisites)\n\n- [Locate DevFile at the root of\nrepository](#locate-devfile-at-the-root-of-repository)\n\n- [Create your workspace](#create-your-workspace)\n\n- [Install dependencies and previewing your application in the\nworkspace](#install-dependencies-and-previewing-your-application-in-the-workspace)\n\n- [Make changes to the application and previewing the updated\nversion](#make-changes-to-the-application-and-previewing-the-updated-version)\n\n- [Commit the change](#commit-the-change)\n\n- [Explore the demo](#explore-the-demo)\n\n- [Try out workspaces](#try-out-workspaces)\n\n\n## Prerequisites \n\nPrior to enabling developers to create workspaces, there are a few\nprerequisites such as bring your own Kubernetes cluster, and install and\nconfigure the GitLab agent for Kubernetes on it. Additionally, certain\nconfiguration steps must be completed on the cluster. You can find detailed\ninstructions for all these steps in [our workspaces prequisites\ndocumentation](https://docs.gitlab.com/ee/user/workspace/configuration.html#prerequisites).\nOnce the prerequisites are properly configured, developers who hold\nDeveloper role or above within the root group will gain the ability to\ncreate workspaces.\n\n\n## Locate DevFile at the root of repository\n\nA [devfile](https://devfile.io/docs/2.2.0/devfile-ecosystem) is a\ndeclarative configuration file, in YAML syntax, used to define and describe\nthe development environment for a software project. It provides a\nstandardized way to specify the necessary tools, languages, runtimes, and\nother components required for developing an application.\n\n\nTo initiate a workspace, it is necessary to have a devfile located at the\nroot of the repository. In this blog post, we will utilize a project that\ncontains a devfile, accessible\n[here](https://gitlab.com/gitlab-da/use-cases/remote-development/example-nodejs-express-app/-/raw/main/.devfile.yaml). \n\n\n```yaml\n\nschemaVersion: 2.2.0\n\ncomponents:\n  - name: tooling-container\n    attributes:\n      gl/inject-editor: true\n    container:\n      # NOTE: THIS IMAGE EXISTS ONLY FOR DEMO PURPOSES AND WILL NOT BE MAINTAINED\n      image: registry.gitlab.com/gitlab-org/remote-development/gitlab-remote-development-docs/debian-bullseye-ruby-3.2-node-18.12:rubygems-3.4-git-2.33-lfs-2.9-yarn-1.22-graphicsmagick-1.3.36-gitlab-workspaces\n      memoryRequest: 1024M\n      memoryLimit: 2048M\n      cpuRequest: 500m\n      cpuLimit: 1000m\n      endpoints:\n      - name: http-3000\n        targetPort: 3000\n```\n\nFor more information, see the [GitLab\ndocumentation](https://docs.gitlab.com/ee/user/workspace/#devfile) and\n[devfile documentation](https://devfile.io/docs/2.2.0/devfile-schema).\n\n\n## Create your workspace \n\n1. Make sure you have a [Developer role or\nabove](https://docs.gitlab.com/ee/user/permissions.html) in the root group,\nand the above prerequisites configured properly.\n\n2. Fork [this\nproject](https://gitlab.com/gitlab-da/use-cases/remote-development/example-nodejs-express-app)\nto the GitLab group for which you have a Developer role or above. \n\n3. Switch contexts and select `Your work`.\n\n![Your\nwork](https://about.gitlab.com/images/blogimages/2023-07-10-your-work.png){:\n.shadow}\n\n4. Select `Workspaces`.\n\n5. Select `New workspace`.\n\n6. Select the project you forked or another project that has a\n`.devfile.yaml` file at the root of the repository. \n\n7. Select the [cluster\nagent](https://docs.gitlab.com/ee/user/workspace/#prerequisites) owned by\nthe group the project belongs to.\n\n8. In `Time before automatic termination`, enter the number of hours until\nthe workspace automatically terminates. This timeout is a safety measure to\nprevent a workspace from consuming excessive resources or running\nindefinitely. \n\n9. Select `Create workspace`. \n\n\n![create\nws](https://about.gitlab.com/images/blogimages/create_workspace.png){:\n.shadow}\n\n\nThe workspace will be deployed to the cluster and might take a few minutes\nto start. To access the workspace, under Preview, select the workspace link.\n\n\n![ws list](https://about.gitlab.com/images/blogimages/workspaces_list.png){:\n.shadow}\n\n\n## Install dependencies and previewing your application in the workspace\n\nAfter creating your workspace, the [Web IDE using VS\nCode](https://docs.gitlab.com/ee/user/workspace/#web-ide) is injected into\nit, and the repository is cloned to the image. Consequently, you gain\nimmediate access to your code and can commence working on it right away.\n\n\nYou can now open the terminal, install any missing dependencies, and start\nthe application.\n\n\n![Terminal](https://about.gitlab.com/images/blogimages/ws-terminal.png){:\n.shadow}\n\n\n1. To open the terminal, from the left menu, select `Terminal`, `New\nTerminal`. \n\n2. Type `npm install` to install the dependencies listed in the\n[package.json](https://gitlab.com/gitlab-da/use-cases/remote-development/example-nodejs-express-app/-/blob/main/package.json)\nfile.\n\n3. Type `npm start` to start the application.\n\n\nThe log will indicate that the application has started on port 3000.\n\n\n![log](https://about.gitlab.com/images/blogimages/server_log.png){: .shadow}\n\n\nYou can now access your application by opening the browser and using the\nworkspace URL. Change the number before ‘workspace’ in the URL to the port\nnumber on which your application is listening (e.g., 3000). For example, if\nyour workspace URL is\n`https://\u003Cprefix>-workspace-73241-25728545-rqvpjm.workspaces.gitlab.dev`,\nand your application is running on port 3000, update `\u003Cprefix>` to 3000 to\naccess your application.\n\n\n## Make changes to the application and previewing the updated version\n\nIn the Web IDE, navigate to the `server.js` file, modify the text in line\n9. \n\n\nAfterward, refresh the browser where your application is opened to see the\napplied changes. \n\n\n## Commit the change \n\n1. In the Web IDE click on the merge icon in the activity bar.\n\n2. Click the line with the `server.js` to view your change side by side.\n\n3. To stage your change, click the plus icon next to `server.js`.\n\n4. Type a commit message describing your change.\n\n5. Click Commit. \n\n6. Click Sync changes to push the commit to the GitLab server.\n\n  ![commit](https://about.gitlab.com/images/blogimages/commit-stage.png){: .shadow}\n\n## Explore the demo \n\nExplore further with this [click-through demo of\nworkspaces](https://go.gitlab.com/qtu66q).\n\n\n## Try out workspaces\n\nRemote Development workspaces offer a convenient and efficient way to work\non projects without the need for local development setups. They provide a\nstreamlined workflow and enable developers to focus on writing code rather\nthan dealing with complex environment setups.\n\n\nBy adopting workspaces, developers can collaborate effectively, improve\nproductivity, and simplify the development process. \n\n\nGive workspaces a try and revolutionize your remote development experience\ntoday!\n\n\nCover image by \u003Ca\nhref=\"https://unsplash.com/@pankajpatel?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Pankaj\nPatel\u003C/a> on \u003Ca\nhref=\"https://unsplash.com/photos/_SgRNwAVNKw?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>\n\n{: .note}\n",[917,721,9],{"slug":4542,"featured":6,"template":700},"quick-start-guide-for-gitlab-workspaces","content:en-us:blog:quick-start-guide-for-gitlab-workspaces.yml","Quick Start Guide For Gitlab Workspaces","en-us/blog/quick-start-guide-for-gitlab-workspaces.yml","en-us/blog/quick-start-guide-for-gitlab-workspaces",{"_path":4548,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4549,"content":4555,"config":4561,"_id":4563,"_type":14,"title":4564,"_source":16,"_file":4565,"_stem":4566,"_extension":19},"/en-us/blog/r2devops-open-source-hub-cicd",{"title":4550,"description":4551,"ogTitle":4550,"ogDescription":4551,"noIndex":6,"ogImage":4552,"ogUrl":4553,"ogSiteName":685,"ogType":686,"canonicalUrls":4553,"schema":4554},"How to create a hub of GitLab CI/CD jobs with R2Devops","Here's how R2Devops and GitLab can work together to streamline CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682395/Blog/Hero%20Images/r2devops1.png","https://about.gitlab.com/blog/r2devops-open-source-hub-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to create a hub of GitLab CI/CD jobs with R2Devops\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Salerno\"}],\n        \"datePublished\": \"2022-07-27\",\n      }",{"title":4550,"description":4551,"authors":4556,"heroImage":4552,"date":4558,"body":4559,"category":1062,"tags":4560},[4557],"Sandra Salerno","2022-07-27","\n\nCI/CD has changed our development processes, but it hasn’t simplified them in every aspect. The amount of knowledge necessary to implement and maintain your first CI/CD pipelines is huge, and the time you need to invest in it is consequential. Partnering with GitLab, R2Devops aims to simplify CI/CD onboarding by creating a hub of CI/CD jobs. In this blog post I'll show you how to use R2DevOps with GitLab to add jobs to an open source hub.\n\n## A collaborative hub of open source jobs\n\nCollaboration is core to our development processes. On a daily basis, we use open source software and code and ask our teammates for review. Working together to achieve common goals helps us to develop better products and improve continuously. With R2Devops, you’ll find a [collaborative library of open source CI/CD jobs](https://r2devops.io/_/jobs). \n\nYou can save a lot of time by using jobs from an open source library. You won’t have to write your pipeline from scratch for every new project, and you can focus on what you like doing, which is coding.\n\nAnd, of course, working together is working smarter. R2Devops empowers collaboration by allowing developers to add their own jobs into the library directly from their GitLab account. \n\n## How to add a job in R2Devops\n\n![Adding a job](https://about.gitlab.com/images/blogimages/r2devops2.gif){: .shadow.small.left}\n\nLink your GitLab account to [R2Devops](https://r2devops.io), fill in the URL of your repository, the path of your job, and give it a name. Once you click on import, our crawler will check three files:\n\n1.) the jobname.yml/jobname.yaml \n\n2.) the changelog.md\n\n3.) the readme.md. \n\nThe crawler process is explained in detail [in our documentation](https://docs.r2devops.io/crawler/). In short, without a jobname.yml file, R2 won’t be able to import your job. The changelog.md allows R2 to check your job’s versions, and the readme.md is used to build the documentation for each version of your job.\n\nEt voilà, anyone can see your job in R2Devops and easily use it in their pipeline.\n\nOnce your job is in R2Devops, you can add information such as the license, description, and specify labels. This helps other users understand what your job can be used for. That data and the job’s code appears in the documentation. 👇\n\n![Data in the documentation](https://about.gitlab.com/images/blogimages/r2devops3.png){: .shadow.small.left}\n\n### Include any jobs in one line with GitLab Include keyword feature\n\n[In January 2019, GitLab released a feature](https://about.gitlab.com/releases/2019/01/22/gitlab-11-7-released/) that simplifies the CI/CD keyword [Include](https://docs.gitlab.com/ee/ci/yaml/index.html#include) process. Rather than copying the code of a job every time you need to create a new pipeline, you can instead indicate to your pipeline where the source is located.\n\nFor example, this:\n\n![pre-include](https://about.gitlab.com/images/blogimages/r2devops4.png){: .shadow.small.left}\n\ncan become the below:\n\n![post-include](https://about.gitlab.com/images/blogimages/r2devops5.png){: .shadow.small.left}\n\nThis feature is used in R2Devops. Every resource added in the library gets its own _Include_ link, so anyone can implement it in one line in their CI/CD. It also means that the file you are using is located in a unique place. Once you update it, you only have to update the include link by modifying the version of the job you want to use. You don’t have to update the whole code in every pipeline you own.\n\n### Customize the job you need using GitLab variables\n\nMost of R2Devops’ jobs are plug and play, meaning you can add the _Include_ link of the job in your pipeline, launch it, and it will work. We understand every project is different and has its own requirements, which is why we defined variables for each job. \n\n[GitLab CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) and YAML overrides allow you to customize the jobs and make them fit your project easily. \n\n![How to customize](https://about.gitlab.com/images/blogimages/r2devops6.png){: .shadow.small.left}\n\nWe have included two jobs from the hub as examples: [python_test](https://r2devops.io/_/r2devops-bot/python_test) code and[sls-scan](https://r2devops.io/_/r2devops-bot/sls_scan). Using the variables defined in the documentation for each job, you can personalize the behavior of these jobs to fit our project requirements.\n\n## Matching GitLab's values of open source and transparency\n\nR2Devops joined the [GitLab Alliance Partner Program](/handbook/alliances/) in March. Both solutions share the same goal – to simplify developer lives by improving development processes. If you want to take part in the development of the open source CI/CD community of GitLab or give feedback on the solution, please [join the R2Devops community on Discord.](https://discord.r2devops.io?utm_medium=website&utm_source=r2devops&utm_campaign=button https://discord.r2devops.io?utm_medium=gitlab&utm_source=blog&utm_campaign=articleR2Devops)\n\nCover image by [Duy Pham](https://unsplash.com/@miinyuii) on [Unsplash](https://unsplash.com)\n{: .note}\n\n\n",[9,721,283],{"slug":4562,"featured":6,"template":700},"r2devops-open-source-hub-cicd","content:en-us:blog:r2devops-open-source-hub-cicd.yml","R2devops Open Source Hub Cicd","en-us/blog/r2devops-open-source-hub-cicd.yml","en-us/blog/r2devops-open-source-hub-cicd",{"_path":4568,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4569,"content":4574,"config":4579,"_id":4581,"_type":14,"title":4582,"_source":16,"_file":4583,"_stem":4584,"_extension":19},"/en-us/blog/refactoring-a-ci-cd-template-to-a-ci-cd-component",{"title":4570,"description":4571,"ogTitle":4570,"ogDescription":4571,"noIndex":6,"ogImage":4514,"ogUrl":4572,"ogSiteName":685,"ogType":686,"canonicalUrls":4572,"schema":4573},"Refactoring a CI/CD template to a CI/CD component","CI/CD components are the next generation of CI/CD templates, enhancing pipeline creation and maintenance. Learn how to transition from templates to components.","https://about.gitlab.com/blog/refactoring-a-ci-cd-template-to-a-ci-cd-component","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Refactoring a CI/CD template to a CI/CD component\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2024-03-04\",\n      }",{"title":4570,"description":4571,"authors":4575,"heroImage":4514,"date":4576,"body":4577,"category":718,"tags":4578},[1835],"2024-03-04","GitLab recently introduced [CI/CD components](https://docs.gitlab.com/ee/ci/components/) as the next generation of the traditional CI/CD templates, and a novel approach to constructing CI/CD pipelines. CI/CD components offer reusable pipeline configurations that can be customized using input parameters.\n\nAlthough GitLab continues to support templates, they come with certain drawbacks that are addressed by the introduction of components. Therefore, we highly recommend refactoring existing templates into CI/CD components.\n\nThis article will guide you through the steps of converting your current GitLab CI/CD templates into reusable CI/CD components. Prior familiarity with how to create CI/CD components is a prerequisite, which you can learn about in this blog post: [Introducing the GitLab CI/CD Catalog Beta](https://about.gitlab.com/blog/introducing-the-gitlab-ci-cd-catalog-beta/).\n\n## How to convert a template to a component\n\nThese are the steps to convert a CI/CD template to a CI/CD component:\n1. Create a component project if you don’t have one. \n2. Copy your existing templates to the ‘templates’ directory in the component project. \n3. For each template, review the jobs listed in it and assess whether you prefer to distribute them across different components or retain some or all within the same component. While it's possible to include multiple jobs in a single component, it's advisable to create components that perform minimal tasks. This approach enhances ease of reuse and flexibility.\n4. Create a new section at the top of the configuration for the input parameters and meta data using the `spec` keyword. \n5. Replace any custom CI/CD variables and any other hard-coded values with [inputs](https://about.gitlab.com/blog/use-inputs-in-includable-files/) to maximize flexibility for consumption. Consider parameterizing elements such as stage, image, job name/job prefix, etc. \n6. Follow the [best practices](https://docs.gitlab.com/ee/ci/components/index.html#best-practices) for components.\n7. Improve the configuration, for example by enabling [merge request pipelines](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html) or making it [more efficient](https://docs.gitlab.com/ee/ci/pipelines/pipeline_efficiency.html).\n\nHere is a code example of a job in an existing template:\n\n![existing template](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678076/Blog/Content%20Images/Screenshot_2024-03-03_at_12.05.25.png)\n\nAnd this is the refactored [component code](https://gitlab.com/components/aws/-/blob/main/templates/ec2-deploy-production.yml?ref_type=heads):\n\n![Converted component](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678077/Blog/Content%20Images/Screenshot_2024-03-03_at_12.07.14.png)\n\nOnce your components are ready, you can publish them to the CI/CD catalog so others will be able to find and consume them. \n\n## Take a product tour\n\nWe've prepared a brief product tour so you can quickly dive into the CI/CD catalog and see it in action (use the \"Next\" button to progress through the demo).\n\n[![Product tour of CI/CD catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678076/Blog/Content%20Images/Screenshot_2024-03-03_at_12.18.09.png)](https://gitlab.navattic.com/cicd-catalog)\n\n## Additional resources \n\nPlease refer to the official documentation on CI/CD components for more on how to [convert templates to components](https://docs.gitlab.com/ee/ci/components/#convert-a-cicd-template-to-a-component).\n\nYou can explore [an additional practical example](https://docs.gitlab.com/ee/ci/components/examples.html#cicd-component-migration-examples), demonstrating the steps to convert GitLab Go templates to CI/CD components.\n\nThen, you can watch the following video where [Fabio Pitino](https://about.gitlab.com/company/team/#fabiopitino), GitLab Principal Engineer, demonstrates the process of refactoring GitLab AWS templates to CI/CD components.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/dGCPrIAuBmE?si=1vjG_aEziY5jn-YC\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line –->\n",[9,696,917],{"slug":4580,"featured":91,"template":700},"refactoring-a-ci-cd-template-to-a-ci-cd-component","content:en-us:blog:refactoring-a-ci-cd-template-to-a-ci-cd-component.yml","Refactoring A Ci Cd Template To A Ci Cd Component","en-us/blog/refactoring-a-ci-cd-template-to-a-ci-cd-component.yml","en-us/blog/refactoring-a-ci-cd-template-to-a-ci-cd-component",{"_path":4586,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4587,"content":4593,"config":4599,"_id":4601,"_type":14,"title":4602,"_source":16,"_file":4603,"_stem":4604,"_extension":19},"/en-us/blog/registration-features-program-expands-by-16-free-features",{"title":4588,"description":4589,"ogTitle":4588,"ogDescription":4589,"noIndex":6,"ogImage":4590,"ogUrl":4591,"ogSiteName":685,"ogType":686,"canonicalUrls":4591,"schema":4592},"Registration Features program expands by 16 free features","More features now available at no cost to free self-managed Enterprise Edition DevSecOps platform customers who register and turn on their Service Ping.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659437/Blog/Hero%20Images/AdobeStock_398929148.jpg","https://about.gitlab.com/blog/registration-features-program-expands-by-16-free-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Registration Features program expands by 16 free features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ian Pedowitz\"}],\n        \"datePublished\": \"2024-01-18\",\n      }",{"title":4588,"description":4589,"authors":4594,"heroImage":4590,"date":4596,"body":4597,"category":693,"tags":4598},[4595],"Ian Pedowitz","2024-01-18","In GitLab 16.0 we [expanded](https://about.gitlab.com/blog/expanded-registration-features-program/) the [Registration Features program](https://docs.gitlab.com/ee/administration/settings/usage_statistics.html#registration-features-program), which offers free self-managed users running [GitLab Enterprise Edition](https://about.gitlab.com/enterprise/) free use of [paid features](https://docs.gitlab.com/ee/administration/settings/usage_statistics.html#available-features) by registering with GitLab and sending us activity data via Service Ping. In GitLab 16.5, 16.6, and 16.7 we’ve broadened the program to include the following 16 features:\n\n1. [Group wikis](https://docs.gitlab.com/ee/user/project/wiki/group.html): If you use GitLab groups to manage multiple projects, some of your documentation might span multiple groups. You can create group wikis, instead of [project wikis](https://docs.gitlab.com/ee/user/project/wiki/index.html), to ensure all group members have the correct access permissions to contribute.\n1. [Issue analytics](https://docs.gitlab.com/ee/user/group/issues_analytics/index.html): Issue analytics is a bar graph that illustrates the number of issues created each month. The default time span is 13 months, which includes the current month, and the 12 months prior. Issue analytics is available for projects and groups.\n1. [Custom text in emails](https://docs.gitlab.com/ee/administration/settings/email.html#custom-additional-text): You can add additional text at the bottom of any email that GitLab sends. This additional text can be used for legal, auditing, or compliance reasons.\n1. [Contribution analytics](https://docs.gitlab.com/ee/user/group/contribution_analytics/index.html): Contribution analytics provide an overview of the [contribution events](https://docs.gitlab.com/ee/user/profile/contributions_calendar.html#user-contribution-events) made by your group’s members.\n1. [Group file templates](https://docs.gitlab.com/ee/user/group/manage.html#group-file-templates): Use group file templates to share a set of templates for common file types with every project in a group. It is analogous to the [instance template repository](https://docs.gitlab.com/ee/administration/settings/instance_template_repository.html).\n1. [Group webhooks](https://docs.gitlab.com/ee/user/project/integrations/webhooks.html#group-webhooks): You can configure a group webhook, which is triggered by events that occur across all projects in the group and its subgroups.\n1. [Service Level Agreement countdown timer](https://docs.gitlab.com/ee/operations/incident_management/incidents.html#service-level-agreement-countdown-timer): You can enable the SLA timer on incidents to track the SLAs you hold with your customers.\n1. [Lock project membership to group](https://docs.gitlab.com/ee/user/group/access_and_permissions.html#prevent-members-from-being-added-to-projects-in-a-group): As a group Owner, you can prevent any new project membership for all projects in a group, allowing tighter control over project membership.\n1. [Users and permissions report](https://docs.gitlab.com/ee/administration/admin_area.html#user-permission-export): An administrator can export user permissions for all users in the GitLab instance from the Admin Area's Users page.\n1. [Advanced search](https://docs.gitlab.com/ee/user/search/advanced_search.html): You can use advanced search for faster, more efficient search across the entire GitLab instance.\n1. [Group DevOps Adoption](https://docs.gitlab.com/ee/user/group/devops_adoption/index.html): DevOps Adoption shows you how groups in your organization adopt and use the most essential features of GitLab.\n1. [Сross-project pipelines with artifacts dependencies](https://docs.gitlab.com/ee/ci/yaml/index.html#needsproject): Use `needs:project` to download artifacts from up to five jobs in other pipelines.\n1. [Feature flag related issues](https://docs.gitlab.com/ee/operations/feature_flags.html#feature-flag-related-issues): You can link related issues to a feature flag.\n1. [Merged results pipelines](https://docs.gitlab.com/ee/ci/pipelines/merged_results_pipelines.html): A merged results pipeline is a type of [merge request pipeline](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html). It is a pipeline that runs against the results of the source and target branches merged together.\n1. [GitLab CI/CD for external repositories](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/index.html): GitLab CI/CD can be used with [GitHub](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/github_integration.html), [Bitbucket Cloud](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/bitbucket_integration.html), or any other Git server, though there are some [limitations](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/index.html#limitations).\n1. [Using GitLab CI/CD with a GitHub repository](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/github_integration.html): GitLab CI/CD can be used with GitHub.com and GitHub Enterprise by creating a [CI/CD project](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/index.html) to connect your GitHub repository to GitLab.\n\nThe above 16 features join the eight features already available to the registration tier in GitLab 16.0 and [prior releases](https://about.gitlab.com/blog/expanded-registration-features-program/).\n\n## How to to participate in the Registration Features program\n\nIf you are interested in participating as a free self-managed user running GitLab Enterprise Edition, you can learn how from our documentation [how to turn on Service Ping](https://docs.gitlab.com/ee/administration/settings/usage_statistics.html#enable-or-disable-usage-statistics).",[9,695,693,1062],{"slug":4600,"featured":6,"template":700},"registration-features-program-expands-by-16-free-features","content:en-us:blog:registration-features-program-expands-by-16-free-features.yml","Registration Features Program Expands By 16 Free Features","en-us/blog/registration-features-program-expands-by-16-free-features.yml","en-us/blog/registration-features-program-expands-by-16-free-features",{"_path":4606,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4607,"content":4612,"config":4617,"_id":4619,"_type":14,"title":4620,"_source":16,"_file":4621,"_stem":4622,"_extension":19},"/en-us/blog/removing-tags-from-small-saas-runner-on-linux",{"title":4608,"description":4609,"ogTitle":4608,"ogDescription":4609,"noIndex":6,"ogImage":3806,"ogUrl":4610,"ogSiteName":685,"ogType":686,"canonicalUrls":4610,"schema":4611},"Removing tags from our small SaaS runner on Linux","With GitLab 17.0, we are removing most tags from our small SaaS runner on Linux. Find out if you are affected and the change you need to make.","https://about.gitlab.com/blog/removing-tags-from-small-saas-runner-on-linux","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Removing tags from our small SaaS runner on Linux\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabriel Engel\"}],\n        \"datePublished\": \"2023-08-15\",\n      }",{"title":4608,"description":4609,"authors":4613,"heroImage":3806,"date":4614,"body":4615,"category":1062,"tags":4616},[2192],"2023-08-15","In GitLab 17.0, we are updating the tags of our [small SaaS runner on\nLinux](https://docs.gitlab.com/ee/ci/runners/saas/linux_saas_runner.html) to\nbe consistent with our other Linux runners.\n\n\n## Who will be affected?\n\nIf you are using the small SaaS runner on Linux with any tag other than\n`saas-linux-small-amd64`, you will be affected as all other tags such as\n`docker` or `linux` will be deprecated. Job configurations that use a\ndeprecated tag will become stuck.\n\n\nAn example job configuration that will be stuck could look like this:\n\n\n```yaml\n\ntest-invalid-tag:\n  stage: test\n  tags:\n  - docker\n  - linux\n  script:\n    - echo \"I'm affected and will be stuck after 17.0\"\n```\n\n\n![Stuck\nJob](https://about.gitlab.com/images/blogimages/2023-08-02-removing-tags-from-our-small-saas-runner-on-linux/stuck-job.png)\n\n\nThe small SaaS runner on Linux is configured to run untagged jobs; this\nremains unchanged.\n\nSo, if you're using the small Linux runner but haven't specified a tag, the\nbehavior of your job will not change.\n\n\n## How to avoid jobs getting stuck\n\n\nTo avoid jobs getting stuck after the 17.0 release, you should change the\ntag in your `.gitlab-ci.yaml` file to `saas-linux-small-amd64`.\n\n\nAn example job configuration that will work:\n\n\n```yaml\n\ntest-correct-tag:\n  stage: test\n  tags:\n  - saas-linux-small-amd64\n  script:\n    - echo \"I'm running as expected\"\n```\n\n\nAnother example that will work is to define no tag, so the runner will pick\nup an untagged job:\n\n\n```yaml\n\ntest-untagged:\n  stage: test\n  script:\n    - echo \"I'm running as expected\"\n```\n\n\n## References\n\n\n- [What are SaaS runners?](https://docs.gitlab.com/ee/ci/runners/)\n\n- [SaaS runners on Linux\ndocumentation](https://docs.gitlab.com/ee/ci/runners/saas/linux_saas_runner.html)\n\n- [Tags - '.gitlab-ci.yml' Keyword\nReference](https://docs.gitlab.com/ee/ci/yaml/#tags)\n",[9,549,693],{"slug":4618,"featured":6,"template":700},"removing-tags-from-small-saas-runner-on-linux","content:en-us:blog:removing-tags-from-small-saas-runner-on-linux.yml","Removing Tags From Small Saas Runner On Linux","en-us/blog/removing-tags-from-small-saas-runner-on-linux.yml","en-us/blog/removing-tags-from-small-saas-runner-on-linux",{"_path":4624,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4625,"content":4631,"config":4637,"_id":4639,"_type":14,"title":4640,"_source":16,"_file":4641,"_stem":4642,"_extension":19},"/en-us/blog/revisiting-the-variables-management-workflow",{"title":4626,"description":4627,"ogTitle":4626,"ogDescription":4627,"noIndex":6,"ogImage":4628,"ogUrl":4629,"ogSiteName":685,"ogType":686,"canonicalUrls":4629,"schema":4630},"Revisiting the variables management workflow","Our users helped us identify the hurdles in the variables management experience and we used those insights to guide improvements.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098484/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_618473457_nd5Dr8kfGdrlTWLOPmDjb_1750098483284.jpg","https://about.gitlab.com/blog/revisiting-the-variables-management-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Revisiting the variables management workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Veethika Mishra\"}],\n        \"datePublished\": \"2024-02-26\",\n      }",{"title":4626,"description":4627,"authors":4632,"heroImage":4628,"date":4633,"body":4634,"category":718,"tags":4635},[1915],"2024-02-26","CI/CD variables play a vital role in building and maintaining CI/CD pipelines and platforms. They are an essential part of the majority of developers’ workflows, serving a range of purposes from storing reusable information to maintaining data integrity. Given their significance, we made enhancing workflows related to CI/CD variables a priority.\nRecently, we conducted interviews with users representing different [personas](https://handbook.gitlab.com/handbook/product/personas/#list-of-user-personas) related to software development, working in teams with different structural and cultural dynamics. Our aim was to gain insights into the challenges they encounter when using and managing CI/CD variables within GitLab. The feedback helped us gain valuable perspective, guiding us toward [necessary improvements](https://gitlab.com/gitlab-org/gitlab/-/issues/418331) in these workflows. Some of the notable changes are highlighted in this blog.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/gdL2cEp3kw0?si=aNmhofDU3DsnofiP\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Better management\n\n![variables management - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098505/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098504762.png)\n\nEffective decision-making regarding the addition, modification, or removal of CI/CD variables hinges on understanding their purpose within a project or group. Lacking visibility into a variable's purpose can complicate these decisions. To address this challenge, we've introduced an enhancement to the variable creation process that will allow users to provide a description detailing the usage and context of a variable, reducing reliance on memory. This description will be displayed in the list, along with the other attributes of the variable. \n\n## Seamless task continuity\n\n![variables management - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098505/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098504763.png)\n\nEfficiency is paramount in software development as it allows developers to make time to focus on qualitative aspects of their work. We have changed the variable creation workflow to facilitate consecutive addition or editing of multiple variables to boost efficiency. Improved, clear notifications and contextual error messages ensure users can perform tasks without the need to repeatedly open separate forms.\n\n## Enhanced error prevention\n\n![variables management - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098505/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098504764.png)\n\nHow the error messages are presented and made accessible in a workflow determines their effectiveness in error resolution. We revisited the different error states users are likely to encounter during variable creation and editing workflow and identified the improvement opportunities ranging from adding new validations and help-texts to enhancing existing error-handling states.\n\n## Share your feedback\nWe believe in taking an iterative approach to better the product. We used insights from the recent user research and our best judgment when deciding on the changes, but there’s always room for improvement. Your feedback from your experience of using the changed UI for performing the tasks in your everyday work will help us understand what’s working and what isn’t, and, therefore, decide on future iterations. Please head to our [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/441177) to share your thoughts and suggestions on the changes made.\n\n## What’s next?\nAs we work on making the existing variables workflow more usable, we’re also making progress on the [GitLab Secret Manager](https://about.gitlab.com/direction/govern/pipeline_security/secrets_management/#overview) to provide users with a more secure method for enabling GitLab, or a component built within GitLab, to connect to other systems.\n\nThere’s an ongoing effort to [improve the variables table layout to clearly represent the visual hierarchy](https://gitlab.com/gitlab-org/gitlab/-/issues/403176) between group and project variables and enhancing the [audit history for CI variables](https://gitlab.com/gitlab-org/gitlab/-/issues/416148) to provide better visibility into activities related to variables.\n\n## Read more about our UI improvements\n- [How we overhauled GitLab navigation](https://about.gitlab.com/blog/navigation-research-blog-post/)\n- [Beautifying our UI: Giving GitLab build features a fresh look](https://about.gitlab.com/blog/beautifying-of-our-ui/)\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[9,4636,696,721],"UX",{"slug":4638,"featured":91,"template":700},"revisiting-the-variables-management-workflow","content:en-us:blog:revisiting-the-variables-management-workflow.yml","Revisiting The Variables Management Workflow","en-us/blog/revisiting-the-variables-management-workflow.yml","en-us/blog/revisiting-the-variables-management-workflow",{"_path":4644,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4645,"content":4651,"config":4656,"_id":4658,"_type":14,"title":4659,"_source":16,"_file":4660,"_stem":4661,"_extension":19},"/en-us/blog/safe-deploys",{"title":4646,"description":4647,"ogTitle":4646,"ogDescription":4647,"noIndex":6,"ogImage":4648,"ogUrl":4649,"ogSiteName":685,"ogType":686,"canonicalUrls":4649,"schema":4650},"GitLab's guide to safe deployment practices","It's important to safeguard your deployment process. Here's our best advice to protect your environments.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678412/Blog/Hero%20Images/safe_deploy.jpg","https://about.gitlab.com/blog/safe-deploys","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's guide to safe deployment practices\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Orit Golowinski\"}],\n        \"datePublished\": \"2020-07-23\",\n      }",{"title":4646,"description":4647,"authors":4652,"heroImage":4648,"date":4653,"body":4654,"category":718,"tags":4655},[892],"2020-07-23","\nHere at GitLab we understand the importance of safe deployment practices. \n\n[Progressive delivery](/direction/ops/#progressive-delivery) is continuous delivery with fine-grained control over who sees the change. This ensures that all code and configuration updates go through the [CI/CD stages](/topics/ci-cd/) to catch any regressions or bugs before they reach customers. If something does make it past those gates, progressive delivery makes sure any negative impact is as small as possible.\n\nWe have recently added several features that add safeguards to your deployment process, which we will review in this blog post.\n\n### Protected Environments\n\nIt is important that deploy jobs are restricted to only those who are authorized to deploy in that environment, and we call this restriction by roles \"protected\". While this feature has been around for a while, it is important to remember that this should be the first step to take when thinking about safe deployments. \n\nTake a deeper dive into [protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html).\n\n### Sequential Deployment (or Safe Continuous Deployment)\n\nIf your project follows the Continuous Deployment practice that deploys the `master` branch to the production environment with GitLab CI/CD pipelines, you may encounter the following problems due to the asynchronous nature of pipeline jobs:\n\n- Multiple deployment jobs run concurrently, targeting the same environment. This can make the environment unstable because the deployment script could conflict and finish in an incomplete state.\n- An older deployment job could overwrite the latest deployment, resulting in an unintentional rollback. Some users could be exposed to old feature sets on the production website even though the pipeline shows that the latest deployment job successfully finished.\n- A pipeline might deploy to production at the worst time, such as on a holiday or over the weekend, when there is limited staff available to solve potential problems.\n\nTo address these problems, GitLab provides the following options:\n\n* [Limit job concurrency](https://docs.gitlab.com/ee/ci/yaml/#resource_group)\n* [Prevent deployment of old versions](https://docs.gitlab.com/ee/ci/pipelines/settings.html#skip-outdated-deployment-jobs)\n* [Deploy freeze](https://docs.gitlab.com/ee/user/project/releases/index.html#prevent-unintentional-releases-by-setting-a-deploy-freeze)\n\n## Limit job concurrency\n\nYou can limit deployment concurrency by adding a `resource_group` to any `.gitlab-ci.yml` jobs that should run one at a time. For example:\n\n* Pipeline-A starts running with SHA-A\n* Pipeline-B starts running with SHA-B (newer)\n* Pipeline-A starts a deployment\n* Pipeline-B waits for Pipeline-A's deployment to finish\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/m6eZb6U-M2A\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n### Prevent deployment of old versions\n\nThe execution order of pipeline jobs can vary from run to run, which could cause undesired behavior. For example, a deployment job in a newer pipeline could finish before a deployment job in an older pipeline. This creates a race condition where the older deployment finishes later, overwriting the \"newer\" deployment.\n\nYou can ensure that older deployment jobs are cancelled automatically when a newer deployment runs by enabling the [prevent deployment of old versions](https://docs.gitlab.com/ee/ci/pipelines/settings.html#skip-outdated-deployment-jobs) feature.\n\n* Pipeline-A starts running with SHA-A\n* Pipeline-B starts running with SHA-B (newer)\n* Pipeline-B finishes. Now SHA-B is on the production environment\n* Pipeline-A is canceled automatically because it was going to deploy SHA-A to production\n\n![Prevent deployment of old versions](https://about.gitlab.com/images/blogimages/older_job.png){: .shadow}\n\n## Deployment Freeze\n\nTo prevent deployments for a particular period, such as during a planned holiday when most employees are out, you can set up a deploy freeze. During a deploy freeze, no deployments can be executed. This is helpful to ensure that deployments do not happen unexpectedly.\n\nFind more detailed information about [deployment safety](https://docs.gitlab.com/ee/ci/environments/deployment_safety.html).\n\n**Read more about GitLab and safety:**\n\n* [Capitalize on GitLab security tools](https://docs.gitlab.com/ee/integration/jenkins.html)\n\n* How app sec engineers [can use GitLab to improve security](/blog/secure-stage-for-appsec/)\n\n* Wondering [how secure GitLab is?](/blog/soc2-compliance/)\n\nCover image by [Mathew Schwartz](https://unsplash.com/photos/qcpwU_oMyu8) on [Unsplash](https://unsplash.com)\n{: .note}\n",[9,2097,697],{"slug":4657,"featured":6,"template":700},"safe-deploys","content:en-us:blog:safe-deploys.yml","Safe Deploys","en-us/blog/safe-deploys.yml","en-us/blog/safe-deploys",{"_path":4663,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4664,"content":4669,"config":4676,"_id":4678,"_type":14,"title":4679,"_source":16,"_file":4680,"_stem":4681,"_extension":19},"/en-us/blog/seamlessly-migrate-from-jira-to-gitlab-with-jira2lab-at-scale",{"title":4665,"description":4666,"ogTitle":4665,"ogDescription":4666,"noIndex":6,"ogImage":1117,"ogUrl":4667,"ogSiteName":685,"ogType":686,"canonicalUrls":4667,"schema":4668},"Seamlessly migrate from Jira to GitLab with Jira2Lab at scale","Discover how Jira2GitLab simplifies large-scale Jira-to-GitLab migrations by handling complex data transfers, improving scalability, and ensuring efficient integration.","https://about.gitlab.com/blog/seamlessly-migrate-from-jira-to-gitlab-with-jira2lab-at-scale","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Seamlessly migrate from Jira to GitLab with Jira2Lab at scale\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Maximilien Belinga\"}],\n        \"datePublished\": \"2024-10-10\",\n      }",{"title":4665,"description":4666,"authors":4670,"heroImage":1117,"date":4672,"body":4673,"category":4674,"tags":4675},[4671],"Maximilien Belinga","2024-10-10","[Atlassian Server reached end of life in February](https://about.gitlab.com/move-to-gitlab-from-atlassian/), prompting many customers to explore alternatives like Atlassian Cloud or Data Center. However, enterprises using Atlassian Server are increasingly seeking Agile planning solutions that offer more flexibility, cost-efficiency, and robust DevSecOps integration. They also need to tackle challenges related to data volume, customization, user mapping, performance, and data integrity during migration. This is where [GitLab’s Jira2Lab](https://gitlab.com/gitlab-org/professional-services-automation/tools/migration/jira2lab) comes into play, offering a seamless solution for large-scale Jira migrations to GitLab, while providing full CI/CD integration.\n\n## The problem with large-scale Jira migrations\n\nMigrating from Jira to GitLab can be a significant hurdle, especially for enterprises with complex workflows and thousands of issues to move. Here are the most common challenges faced during such migrations:\n\n- **Massive data migration:** As the number of issues, attachments, comments, and projects increases, so does the complexity of migrating them without performance issues or data loss.\n\n- **Custom fields and workflows:** Jira instances often contain custom workflows, fields, and issue types that do not have a one-to-one mapping in GitLab. This gap creates friction during migration, as existing tools often require manual intervention to translate these elements.\n\n- **Lack of full DevSecOps integration:** While many migration tools handle project management data, they do not integrate GitLab’s full DevSecOps capabilities. As a result, teams are left to manually configure their [CI/CD](https://about.gitlab.com/topics/ci-cd/) pipelines and source control management systems after the migration.\n\n## Introducing Jira2Lab\n\nJira2Lab was designed from the ground up to solve the specific challenges of migrating from Jira to GitLab at scale. It’s not just about moving data; it’s about enabling teams to seamlessly transition into GitLab’s powerful DevSecOps environment without downtime or data loss.\n\n### Key features of Jira2Lab\n\n1. Efficient data handling at scale\u003Cbr> \nJira2Lab is optimized to handle thousands of issues, attachments, comments, and custom fields across multiple projects without sacrificing performance. It scales effortlessly to accommodate even the largest enterprise migrations.\n\n2. Custom workflow and field mapping\u003Cbr>\nOne of the standout features of Jira2Lab is its ability to automatically map custom workflows and fields from Jira to GitLab. The tool provides a flexible mapping configuration that eliminates the need for manual intervention during the migration process, making sure everything moves smoothly from Jira to GitLab.\n\n3. CI/CD pipeline integration\u003Cbr>\nJira2Lab doesn’t just migrate your issues and projects — it integrates GitLab’s full CI/CD pipeline into the migration process. This ensures that development teams can start using GitLab’s DevSecOps features, such as automated testing and deployment pipelines, immediately after migration.\n\n4. Pilot migrations\u003Cbr>\nOur tool supports pilot migrations to allow teams to test their configurations and workflows before scaling up. This ensures that any issues can be caught early, preventing disruptions during the full migration.\n\n5. Real-time monitoring\u003Cbr>\nThe tool provides real-time monitoring and logs during migration, giving complete transparency to ensure every step is performed correctly and without errors.\n\n6. Customizable and flexible\u003Cbr>\nEven if your Jira instance has unique configurations or workflows, Jira2Lab offers the flexibility to customize the migration according to your specific requirements, ensuring nothing is lost in translation.\n\n### Feature comparison: Jira vs. GitLab\n\nMigrating from Jira to GitLab helps consolidate workflows and unlock advanced features native to GitLab. Here’s a quick comparison of the core features of both platforms:\n\n| **Feature**             | **Jira**                        | **GitLab**                    |\n|-------------------------|----------------------------------|-------------------------------|\n| **Issue Tracking**       | Yes (Highly customizable)       | Yes (Integrated with DevSecOps)   |\n| **Agile Boards**         | Yes (Kanban, Scrum)             | Yes (Issue Boards, Milestones) |\n| **CI/CD**                | No (Requires external tools)    | Yes (Built-in CI/CD)           |\n| **Source Control**       | No (Requires GitHub/Bitbucket)  | Yes (Native Git support)       |\n| **DevSecOps Tools**         | Limited integrations            | Full DevSecOps lifecycle          |\n\nWith Jira2Lab, we ensure that all critical aspects, from issue tracking to CI/CD pipelines, are transitioned smoothly, taking full advantage of GitLab’s integrated approach to development and operations.\n\n## The migration methodology\n\nJira2Lab follows a structured, five-phase migration methodology, ensuring seamless transition with minimal disruption:\n\n### 1. Discovery and planning\n\nWe start by thoroughly understanding the customer’s Jira setup, identifying all necessary custom workflows, fields, and projects that need to be migrated. This phase also involves a gap analysis to compare Jira and GitLab features and map out the migration process.\n\n### 2. Setup\nIn this phase, we configure the migration tool and set up the necessary environments for both Jira and GitLab. This includes verifying all permissions and setting up a backup of Jira data before the migration begins.\n\n### 3. Pilot migrations\nBefore migrating the entire dataset, we run pilot migrations on selected projects to test the migration process, workflows, and data integrity. This allows us to identify and resolve any issues early in the process.\n\n### 4. Scaled migrations\nAfter validating the pilot migration, we scale the migration across all projects, ensuring minimal downtime and smooth transitions for development teams.\n\n### 5. Wrap-up and post-migration support\nOnce the migration is complete, we provide ongoing support, ensuring all teams are fully operational in GitLab. This phase also includes user training and the decommissioning of the Jira instance, if required.\n\n## Case study: Tackling scale with Jira2Lab\n\nIn a recent migration, a large enterprise faced the challenge of migrating over 20,000 issues across 50 projects from Jira to GitLab. The project had highly customized workflows and thousands of comments and attachments that needed to be transferred.\n\nWith Jira2Lab, we were able to:\n\n- Migrate all data, including custom fields, without any data loss.\n- Set up CI/CD pipelines within GitLab so that teams could immediately continue their work post-migration.\n- Conduct a pilot migration of two projects, which allowed us to identify and fix minor workflow discrepancies before scaling up to the entire organization.\n\nThe result was a seamless transition to GitLab, with the entire process completed within the planned timeline and no significant downtime.\n\n## Get started with Jira2Lab today\n\nJira2Lab stands out in the market by addressing the limitations that other migration tools cannot handle. It is designed specifically for large-scale migrations and can integrate with GitLab’s full DevSecOps lifecycle, unlike most tools that only handle project management data. The tool’s ability to map custom workflows and integrate CI/CD pipelines makes it the perfect solution for enterprises looking to enhance their development workflows while migrating to GitLab.\n\n> Ready to scale your development processes with GitLab? Explore our [Professional Services catalog](https://about.gitlab.com/services/catalog/) to learn how we can help your team migrate efficiently and effectively. Contact us through the form at the end for a personalized demo of GitLab's Jira2Lab.\n","agile-planning",[999,9,696,695,693],{"slug":4677,"featured":91,"template":700},"seamlessly-migrate-from-jira-to-gitlab-with-jira2lab-at-scale","content:en-us:blog:seamlessly-migrate-from-jira-to-gitlab-with-jira2lab-at-scale.yml","Seamlessly Migrate From Jira To Gitlab With Jira2lab At Scale","en-us/blog/seamlessly-migrate-from-jira-to-gitlab-with-jira2lab-at-scale.yml","en-us/blog/seamlessly-migrate-from-jira-to-gitlab-with-jira2lab-at-scale",{"_path":4683,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4684,"content":4690,"config":4695,"_id":4697,"_type":14,"title":4698,"_source":16,"_file":4699,"_stem":4700,"_extension":19},"/en-us/blog/secure-and-publish-python-packages-a-guide-to-ci-integration",{"title":4685,"description":4686,"ogTitle":4685,"ogDescription":4686,"noIndex":6,"ogImage":4687,"ogUrl":4688,"ogSiteName":685,"ogType":686,"canonicalUrls":4688,"schema":4689},"Secure and publish Python packages: A guide to CI integration","Learn how to implement a secure CI/CD pipeline across five stages with the GitLab DevSecOps platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662080/Blog/Hero%20Images/AdobeStock_1097303277.jpg","https://about.gitlab.com/blog/secure-and-publish-python-packages-a-guide-to-ci-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Secure and publish Python packages: A guide to CI integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2025-01-21\",\n      }",{"title":4685,"description":4686,"authors":4691,"heroImage":4687,"date":4692,"body":4693,"category":697,"tags":4694},[1122],"2025-01-21","Supply chain security is a critical concern in software development.\nOrganizations need to verify the authenticity and integrity of their\nsoftware packages. This guide will show you how to implement a secure CI/CD\npipeline for Python packages using GitLab CI, incorporating package signing\nand attestation using Sigstore's Cosign.\n\n\nYou'll learn:\n\n\n- [Why sign and attest your Python\npackages?](#why-sign-and-attest-your-python-packages%3F)\n\n- [Pipeline overview](#pipeline-overview)\n\n- [Complete pipeline implementation: Setting up the\nenvironment](#complete-pipeline-implementation-setting-up-the-environment)\n   * [Environment configuration](#environment-configuration)\n   * [Configuration breakdown](#configuration-breakdown)\n-  The 6 stages\n\n    1. [Building](#building-crafting-the-package)\n    2. [Signing](#signing-the-digital-notarization)\n    3. [Verification](#verification-the-security-checkpoint)\n    4. [Publishing](#publishing-the-controlled-release)\n    5. [Publishing signatures](#publishing-signatures-making-verification-possible)\n    6. [Consumer verification](#consumer-verification-testing-the-user-experience)\n\n## Why sign and attest your Python packages?\n\n\nHere are four reasons to sign and attest your Python packages:\n\n\n* **Supply chain security:** Package signing ensures that the code hasn't\nbeen tampered with between build and deployment, protecting against supply\nchain attacks.\n\n* **Compliance requirements:** Many organizations, especially in regulated\nindustries, require cryptographic signatures and provenance information for\nall deployed software.\n\n* **Traceability:** Attestations provide a verifiable record of build\nconditions, including who built the package and under what circumstances.\n\n* **Trust verification:** Consumers of your package can cryptographically\nverify its authenticity before installation.\n\n\n## Pipeline overview\n\n\nEnsuring your code's integrity and authenticity is necessary. Imagine a\npipeline that doesn't just compile your code but creates a cryptographically\nverifiable narrative of how, when, and by whom your package was created.\nEach stage acts as a guardian, checking and documenting the package's\nprovenance.\n\n\nHere are six stages of a GitLab pipeline that ensure your package is secure\nand trustworthy:\n\n\n* Build: Creates a clean, standard package that can be easily shared and\ninstalled.\n\n* Signing: Adds a digital signature that proves the package hasn't been\ntampered with since it was created.\n\n* Verification: Double-checks that the signature is valid and the package\nmeets all our security requirements.\n\n* Publishing: Uploads the verified package to GitLab's package registry,\nmaking it available for others to use.\n\n* Publishing Signatures: Makes signatures available for verification.\n\n* Consumer Verification: Simulates how end users can verify package\nauthenticity.\n\n\n## Complete pipeline implementation: Setting up the environment\n\n\nBefore we build our package, we need to set up a consistent and secure build\nenvironment. This configuration ensures every package is created with the\nsame tools, settings, and security checks.\n\n\n### Environment configuration\n\n\nOur pipeline requires specific tools and settings to work correctly.\n\n\nPrimary configurations:\n\n\n* Python 3.10 for consistent builds\n\n* Cosign 2.2.3 for package signing\n\n* GitLab package registry integration\n\n* Hardcoded package version for reproducibility\n\n\n**Note about versioning:** We've chosen to use a hardcoded version\n(`\"1.0.0\"`) in this example rather than deriving it from git tags or\ncommits. This approach ensures complete reproducibility and makes the\npipeline behavior more predictable. In a production environment, you might\nwant to use semantic versioning based on git tags or another versioning\nstrategy that fits your release process.\n\n\nTool requirements:\n\n\n* Basic utilities: `curl`, `wget`\n\n* Cosign for cryptographic signing\n\n* Python packaging tools: `build`, `twine`, `setuptools`, `wheel`\n\n\n### Configuration breakdown\n\n\n```yaml\n\nvariables:\n  PYTHON_VERSION: '3.10'\n  PACKAGE_NAME: ${CI_PROJECT_NAME}\n  PACKAGE_VERSION: \"1.0.0\"\n  FULCIO_URL: 'https://fulcio.sigstore.dev'\n  REKOR_URL: 'https://rekor.sigstore.dev'\n  CERTIFICATE_IDENTITY: 'https://gitlab.com/${CI_PROJECT_PATH}//.gitlab-ci.yml@refs/heads/${CI_DEFAULT_BRANCH}'\n  CERTIFICATE_OIDC_ISSUER: 'https://gitlab.com'\n  PIP_CACHE_DIR: \"$CI_PROJECT_DIR/.pip-cache\"\n  COSIGN_YES: \"true\"\n  GENERIC_PACKAGE_BASE_URL: \"${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/${PACKAGE_NAME}/${PACKAGE_VERSION}\"\n```\n\n\nWe use caching to speed up subsequent builds:\n\n\n```yaml\n\ncache:\n  paths:\n    - ${PIP_CACHE_DIR}\n```\n\n\n## Building: Crafting the package\n\n\nEvery software journey begins with creation. In our pipeline, the build\nstage is where raw code transforms into a distributable package, ready to\ntravel across different Python environments.\n\n\nThe build process creates two standardized formats:\n\n\n* a wheel package (.whl) for quick, efficient installation\n\n* a source distribution (.tar.gz) that carries the complete code\n\n\nHere's the build stage implementation:\n\n\n```yaml\n\nbuild:\n  extends: .python-job\n  stage: build\n  script:\n    - git init\n    - git config --global init.defaultBranch main\n    - git config --global user.email \"ci@example.com\"\n    - git config --global user.name \"CI\"\n    - git add .\n    - git commit -m \"Initial commit\"\n    - export NORMALIZED_NAME=$(echo \"${CI_PROJECT_NAME}\" | tr '-' '_')\n    - sed -i \"s/name = \\\".*\\\"/name = \\\"${NORMALIZED_NAME}\\\"/\" pyproject.toml\n    - sed -i \"s|\\\"Homepage\\\" = \\\".*\\\"|\\\"Homepage\\\" = \\\"https://gitlab.com/${CI_PROJECT_PATH}\\\"|\" pyproject.toml\n    - python -m build\n  artifacts:\n    paths:\n      - dist/\n      - pyproject.toml\n```\n\n\nLet's break down what this build stage does:\n\n\n1. Initializes a Git repository (`git init`) and configures it with basic\nsettings\n\n2. Normalizes the package name by converting hyphens to underscores, which\nis required for Python packaging\n\n3. Updates the package metadata in `pyproject.toml` to match our project\nsettings\n\n4. Builds both wheel and source distribution packages using `python -m\nbuild`\n\n5. Preserves the built packages and configuration as artifacts for\nsubsequent stages\n\n\n## Signing: The digital notarization\n\n\nIf attestation is the package's biography, signing is its cryptographic seal\nof authenticity. This is where we transform our package from a mere\ncollection of files into a verified, tamper-evident artifact.\n\n\nThe signing stage uses Cosign to apply a digital signature as an unbreakable\nseal. This isn't just a stamp — it's a complex cryptographic handshake that\nproves the package's integrity and origin.\n\n\n```yaml\n\nsign:\n  extends: .python+cosign-job\n  stage: sign\n  id_tokens:\n    SIGSTORE_ID_TOKEN:\n      aud: sigstore\n  script:\n    - |\n      for file in dist/*.whl dist/*.tar.gz; do\n        if [ -f \"$file\" ]; then\n          filename=$(basename \"$file\")\n          cosign sign-blob --yes \\\n            --fulcio-url=${FULCIO_URL} \\\n            --rekor-url=${REKOR_URL} \\\n            --oidc-issuer $CI_SERVER_URL \\\n            --identity-token $SIGSTORE_ID_TOKEN \\\n            --output-signature \"dist/${filename}.sig\" \\\n            --output-certificate \"dist/${filename}.crt\" \\\n            \"$file\"\n        fi\n      done\n  artifacts:\n    paths:\n      - dist/\n```\n\n\nThis signing stage performs several crucial operations:\n\n\n1. Obtains an OIDC token from GitLab for authentication with Sigstore\nservices\n\n2. Processes each built package (both wheel and source distribution)\n\n3. Uses Cosign to create a cryptographic signature (`.sig`) for each package\n\n4. Generates a certificate (`.crt`) that proves the signature's authenticity\n\n5. Stores both signatures and certificates alongside the packages as\nartifacts\n\n\n## Verification: The security checkpoint\n\n\nVerification is our final quality control gate. It's not just a check — it's\na security interrogation where every aspect of the package is scrutinized.\n\n\n```yaml\n\nverify:\n  extends: .python+cosign-job\n  stage: verify\n  script:\n    - |\n      failed=0\n      for file in dist/*.whl dist/*.tar.gz; do\n        if [ -f \"$file\" ]; then\n          filename=$(basename \"$file\")\n          if ! cosign verify-blob \\\n            --signature \"dist/${filename}.sig\" \\\n            --certificate \"dist/${filename}.crt\" \\\n            --certificate-identity \"${CERTIFICATE_IDENTITY}\" \\\n            --certificate-oidc-issuer \"${CERTIFICATE_OIDC_ISSUER}\" \\\n            \"$file\"; then\n            failed=1\n          fi\n        fi\n      done\n      if [ $failed -eq 1 ]; then\n        exit 1\n      fi\n```\n\n\nThe verification stage implements several security checks:\n\n\n1. Examines each package file in the `dist` directory\n\n2. Uses Cosign to verify the signature matches the package content\n\n3. Confirms the certificate's identity matches our expected GitLab pipeline\nidentity\n\n4. Validates our trusted OIDC provider issued the certificate\n\n5. Fails the entire pipeline if any verification check fails, ensuring only\nverified packages proceed\n\n\n## Publishing: The controlled release\n\n\nPublishing is where we make our verified packages available through GitLab's\npackage registry. It's a carefully choreographed release that ensures only\nverified, authenticated packages reach their destination.\n\n\n```yaml\n\npublish:\n  extends: .python-job\n  stage: publish\n  script:\n    - |\n      cat \u003C\u003C EOF > ~/.pypirc\n      [distutils]\n      index-servers = gitlab\n      [gitlab]\n      repository = ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/pypi\n      username = gitlab-ci-token\n      password = ${CI_JOB_TOKEN}\n      EOF\n      TWINE_PASSWORD=${CI_JOB_TOKEN} TWINE_USERNAME=gitlab-ci-token \\\n        twine upload --repository-url ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/pypi \\\n        dist/*.whl dist/*.tar.gz\n```\n\n\nThe publishing stage handles several important tasks:\n\n\n1. Creates a `.pypirc` configuration file with GitLab package registry\ncredentials\n\n2. Uses the GitLab CI job token for secure authentication\n\n3. Uploads both wheel and source distribution packages to the GitLab PyPI\nregistry\n\n4. Makes the packages available for installation via pip\n\n\n## Publishing signatures: Making verification possible\n\n\nAfter publishing the packages, we must make their signatures and\ncertificates available for verification. We store these in GitLab's generic\npackage registry, making them easily accessible to users who want to verify\npackage authenticity.\n\n\n```yaml\n\npublish_signatures:\n  extends: .python+cosign-job\n  stage: publish_signatures\n  script:\n    - |\n      for file in dist/*.whl dist/*.tar.gz; do\n        if [ -f \"$file\" ]; then\n          filename=$(basename \"$file\")\n          curl --header \"JOB-TOKEN: ${CI_JOB_TOKEN}\" \\\n               --fail \\\n               --upload-file \"dist/${filename}.sig\" \\\n               \"${GENERIC_PACKAGE_BASE_URL}/${filename}.sig\"\n\n          curl --header \"JOB-TOKEN: ${CI_JOB_TOKEN}\" \\\n               --fail \\\n               --upload-file \"dist/${filename}.crt\" \\\n               \"${GENERIC_PACKAGE_BASE_URL}/${filename}.crt\"\n        fi\n      done\n```\n\n\nThe signature publishing stage performs these key operations:\n\n\n1. Processes each built package to find its corresponding signature files\n\n2. Uses the GitLab API to upload the signature (`.sig`) file to the generic\npackage registry\n\n3. Uploads the corresponding certificate (`.crt`) file\n\n4. Makes these verification artifacts available for downstream package\nconsumers\n\n5. Uses the same version and package name to maintain the connection between\npackages and signatures\n\n\n## Consumer verification: Testing the user experience\n\n\nThe final stage simulates how end users will verify your package's\nauthenticity. This stage acts as a final check and a practical example of\nthe verification process.\n\n\n```yaml\n\nconsumer_verification:\n  extends: .python+cosign-job\n  stage: consumer_verification\n  script:\n    - |\n      git init\n      git config --global init.defaultBranch main\n      mkdir -p pkg signatures\n\n      pip download --index-url \"https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.com/api/v4/projects/${CI_PROJECT_ID}/packages/pypi/simple\" \\\n          \"${NORMALIZED_NAME}==${PACKAGE_VERSION}\" --no-deps -d ./pkg\n\n      pip download --no-binary :all: \\\n          --index-url \"https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.com/api/v4/projects/${CI_PROJECT_ID}/packages/pypi/simple\" \\\n          \"${NORMALIZED_NAME}==${PACKAGE_VERSION}\" --no-deps -d ./pkg\n\n      failed=0\n      for file in pkg/*.whl pkg/*.tar.gz; do\n        if [ -f \"$file\" ]; then\n          filename=$(basename \"$file\")\n          sig_url=\"${GENERIC_PACKAGE_BASE_URL}/${filename}.sig\"\n          cert_url=\"${GENERIC_PACKAGE_BASE_URL}/${filename}.crt\"\n\n          curl --fail --silent --show-error \\\n               --header \"JOB-TOKEN: ${CI_JOB_TOKEN}\" \\\n               --output \"signatures/${filename}.sig\" \\\n               \"$sig_url\"\n\n          curl --fail --silent --show-error \\\n               --header \"JOB-TOKEN: ${CI_JOB_TOKEN}\" \\\n               --output \"signatures/${filename}.crt\" \\\n               \"$cert_url\"\n\n          if ! cosign verify-blob \\\n            --signature \"signatures/${filename}.sig\" \\\n            --certificate \"signatures/${filename}.crt\" \\\n            --certificate-identity \"${CERTIFICATE_IDENTITY}\" \\\n            --certificate-oidc-issuer \"${CERTIFICATE_OIDC_ISSUER}\" \\\n            \"$file\"; then\n            failed=1\n          fi\n        fi\n      done\n\n      if [ $failed -eq 1 ]; then\n        exit 1\n      fi\n```\n\n\nThis consumer verification stage simulates the end-user experience by:\n\n\n1. Creating a clean environment to test package installation\n\n2. Downloading the published packages from the GitLab PyPI registry\n\n3. Retrieving the corresponding signatures and certificates from the generic\npackage registry\n\n4. Performing the same verification steps that end users would perform\n\n5. Ensuring the entire process works from a consumer's perspective\n\n6. Failing the pipeline if any verification step fails, providing an early\nwarning of any issues\n\n\n## Summary\n\n\nThis comprehensive pipeline provides a secure and reliable way to build,\nsign, and publish Python packages to GitLab's package registry. By following\nthese practices and implementing the suggested security measures, you can\nensure your packages are appropriately verified and safely distributed to\nyour users.\n\n\nThe pipeline combines modern security practices with efficient automation to\ncreate a robust software supply chain. Using Sigstore's Cosign for signing\nand attestation, along with GitLab's built-in security features, you can\nprovide users with trustworthy cryptographically verified packages.\n\n\n> #### Get started on your security journey today with a [free trial\nof GitLab\nUltimate](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com).\n\n\n## Learn more\n\n- [Documentation: Use Sigstore for keyless signing and\nverification](https://docs.gitlab.com/ee/ci/yaml/signing_examples.html)\n\n- [Streamline security with keyless signing and verification in\nGitLab](https://about.gitlab.com/blog/keyless-signing-with-cosign/)\n\n- [Annotate container images with build provenance using Cosign in GitLab\nCI/CD](https://about.gitlab.com/blog/annotate-container-images-with-build-provenance-using-cosign-in-gitlab-ci-cd/)\n",[697,232,283,695,785,9,495,917,1127],{"slug":4696,"featured":91,"template":700},"secure-and-publish-python-packages-a-guide-to-ci-integration","content:en-us:blog:secure-and-publish-python-packages-a-guide-to-ci-integration.yml","Secure And Publish Python Packages A Guide To Ci Integration","en-us/blog/secure-and-publish-python-packages-a-guide-to-ci-integration.yml","en-us/blog/secure-and-publish-python-packages-a-guide-to-ci-integration",{"_path":4702,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4703,"content":4708,"config":4714,"_id":4716,"_type":14,"title":4717,"_source":16,"_file":4718,"_stem":4719,"_extension":19},"/en-us/blog/security-scan-experience",{"title":4704,"description":4705,"ogTitle":4704,"ogDescription":4705,"noIndex":6,"ogImage":3299,"ogUrl":4706,"ogSiteName":685,"ogType":686,"canonicalUrls":4706,"schema":4707},"My experience interning to work with security scanning at GitLab","Experience with doing a 4 week internship implementing security scans","https://about.gitlab.com/blog/security-scan-experience","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"My experience interning to work with security scanning at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eric Rosenberg\"}],\n        \"datePublished\": \"2020-08-04\",\n      }",{"title":4704,"description":4705,"authors":4709,"heroImage":3299,"date":4711,"body":4712,"category":978,"tags":4713},[4710],"Eric Rosenberg","2020-08-04","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nMy name is Eric Rosenberg and I am a support engineer at GitLab.  I’ve always had an interest in security and have spent my support career with \na goal in mind to one day work in a security position.  After speaking with some of our security team members, and directors, I was chosen to\nparticipate in a 4 week security internship to implement security scanners on some selected open source projects that we have hosted on GitLab.\n\nI wanted to explain the details of my internship and share my experience to hopefully help others that may be chosen for similar internships, \nand also spread knowledge about the scanners that GitLab offers.\n\nThe internship that I took part in was to integrate [GitLab Secure](https://docs.gitlab.com/ee/user/application_security/) features into \nopen-source projects that are hosted on GitLab.com to improve those projects, increase awareness of GitLab’s security offerings, and get us \nvaluable feedback to help us improve the product.  Some of the goals to achieve personally were to help others understand how simple it is to \nadd security into their pipelines, build my knowledge of using our scanners and working through any issues along the way, and to help provide \nfeedback not only to the project owners but also back to GitLab.\n\n## What I did\n\nMy first week was mainly prep work in order to find a few projects that I could reach out to, and hopefully work with, the project’s \nmaintainers/owners.  I wanted to explain the internship, what my goals were, and how the security scans could be beneficial.  I also wanted to \nhave a stable testing environment that I could copy the project over to, so that I would not interfere with their project, just in case they \ndid not want to participate and also so I would not make any changes that could potentially cause issues on their end. I wanted to also run \nthrough all of the [security scan types](https://docs.gitlab.com/ee/user/application_security/#security-scanning-tools) on my own projects so \nthat I could become more familiar with what the scans were, how to use them, and how to read their output.  I decided to focus mainly on \nproject [ASE](https://gitlab.com/ase/ase) as the project owner was happy to have some extra added security, as well as a point of contact for \nquestions on scanning their code.\n\nThe second week I had a project owner, [ASE](https://gitlab.com/ase/ase), reply to my email and was very interested in working with me.  He \nexplained he was busy and may not have a lot of time to communicate with me, however he was happy to have me take lead and add the scans so \nthat I could provide the results back to him.  I was able to copy the project to my test instance, run the scans, provide the results, and in \nthe end submit an MR so that they could implement the scans on their end and use them moving forward.  \n\nThe third week was mainly focussed on the results from the scans and helping provide the answers to many questions the project owner had.  This \nwas expected and greatly appreciated from my point of view as this not only showed me that the project owner had a lot of interest in keeping \nthe project secure, but it challenged me to work with the members of our security team and build my knowledge of what needed to be done so that \nI was able to then relay this information back to the project owner.  I felt that I gained a lot of information and towards the end of the \nweek, I was very comfortable discussing steps to not only use the scanners but to make changes to the code to keep things secure.  Using the \nSAST scanner (Static Application Security Testing) I was able to scan the Ruby code and print out known vulnerabilities within the Security & \nCompliance dashboard within the admin area.  One of the things I found interesting was finding “false negatives” when it came to the \nvulnerability report.  For example: “Password in URL detected; please remove and revoke it if this is a leak.”  This would cause alarm to \nanyone that views this in their security & compliance dashboard, however after taking a further look, the password that was being displayed was \nonly an example, which caused no issues.  \n\nMy fourth week I wanted to dedicate to wrapping up my internship and providing as much feedback as possible.  I was able to keep notes along \nthe way, as well as one on one meetings with my internship mentor every week.  I felt that the communication was amazing when it was needed.  I \nwas able to reach out over slack anytime and either receive the answers I needed, or I was pointed in the correct location so that I could \ndiscuss with the team.\n\n## Closing Thoughts\n\nSome feedback I would like to add is that the timing, while being sufficient enough to handle what I needed, was not long enough for what I \nwould have hoped for.  I would have wished for more time to work on different projects, with different project owners, in order to provide a \nbetter outcome of variety within the time of my internship.  That being said, since security is still a focus of mine, I am glad that GitLab \nallows me the flexibility to still keep in contact with the project owners I worked with, and I am happy to continue to help them with the \nknowledge I have learned from doing this internship, and I cant wait to learn even more.  \n\nI believe that in the near future, we will be able to provide internships that can open more opportunities for GitLab team members to be \ninvolved with security type positions and raise interests in working in security.  I know that it is tough to extend a “shadowing” type \ninternship into the security field as there is more sensitive data being dealt with, but hopefully this internship will continue to be offered \nand grow to even higher possibilities.\n\nOverall, I am extremely happy that I was chosen to take part in this internship, and I would hope to work more with the team in the future.  I \nhave learned a lot and I look forward to using this knowledge to help others including team members and project owners.\n",[9,232,697],{"slug":4715,"featured":6,"template":700},"security-scan-experience","content:en-us:blog:security-scan-experience.yml","Security Scan Experience","en-us/blog/security-scan-experience.yml","en-us/blog/security-scan-experience",{"_path":4721,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4722,"content":4728,"config":4735,"_id":4737,"_type":14,"title":4738,"_source":16,"_file":4739,"_stem":4740,"_extension":19},"/en-us/blog/sentry-integration-blog-post",{"title":4723,"description":4724,"ogTitle":4723,"ogDescription":4724,"noIndex":6,"ogImage":4725,"ogUrl":4726,"ogSiteName":685,"ogType":686,"canonicalUrls":4726,"schema":4727},"Sentry's GitLab integration streamlines error remediation","Your code has bugs, my code has bugs, everyone’s code has bugs (probably). Let’s fix that.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679964/Blog/Hero%20Images/sentry-io-blog.jpg","https://about.gitlab.com/blog/sentry-integration-blog-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Streamline and shorten error remediation with Sentry’s new GitLab integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eva Sasson\"}],\n        \"datePublished\": \"2019-01-25\",\n      }",{"title":4729,"description":4724,"authors":4730,"heroImage":4725,"date":4732,"body":4733,"category":783,"tags":4734},"Streamline and shorten error remediation with Sentry’s new GitLab integration",[4731],"Eva Sasson","2019-01-25","\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/KUHk1uuXWhA?rel=0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nSentry is open source error tracking that gives visibility across your entire stack and provides the details you need to fix bugs, ASAP. Because the only thing better than visibility and details is more visibility and details, Sentry improved their [GitLab integration](https://docs.sentry.io/workflow/integrations/global-integrations/gitlab/?utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM) by adding [release](https://docs.sentry.io/workflow/releases/?platform=browser&utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM) and [commit](https://docs.sentry.io/workflow/releases/?platform=browser&utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM#link-repository) tracking as well as [suspect commits](https://docs.sentry.io/workflow/releases/?platform=browser&utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM#after-linking-a-repository).\n\n### Streamline your workflow with issue management and creation\n\nWhen you receive an alert about an error, the last thing you want to do is to jump around 20 different tools trying to find out exactly what happened and where. Developers with both Sentry and GitLab in their application lifecycle benefit from issue management and issue creation to their GitLab accounts directly in the Sentry UI, alleviating some of the hassle of back-and-forth tool toggling.\n\n![GitLab account in Sentry](https://about.gitlab.com/images/blogimages/sentry/gitlab-sentry-integration.png){: .shadow.large.center}\n\nOf course, less tool jumping results in a more streamlined triaging process and shortened time to issue resolution – something that benefits the whole team.\n\n![Creating GitLab issue](https://about.gitlab.com/images/blogimages/sentry/create-gitlab-issue.png){: .shadow.medium.center}\n\nHave a GitLab issue that wasn’t created in Sentry? No problem. Existing issues are also easily linked.\n\n![Import GitLab issue](https://about.gitlab.com/images/blogimages/sentry/import-gitlab-issue.png){: .shadow.medium.center}\n\n### Find and fix bugs faster with release and commit tracking\n\nWhy stop at streamlining the triaging process, when we can also make issue resolution more efficient? Sentry’s GitLab integration now utilizes GitLab commits to find and fix bugs faster.\n\nWith the newly added release and commit tracking, an enhanced release overview page uncovers new and resolved issues, files changed, and authors. Developers can also resolve issues via commit messages or merge requests, see suggested assignees for issues, and receive detailed deploy emails.\n\nWant a big flashing arrow that points to an error’s root cause? Sentry’s suspect commits feature exposes the commit that likely introduced an error as well as the developer who wrote the broken code.\n\n![Suspect commits feature](https://about.gitlab.com/images/blogimages/sentry/suspect-commits-feature.png){: .shadow.medium.center}\n\nKeep in mind that this feature is available for Sentry users on “Teams” plans and above.\n{: .note}\n\nCheck out [Sentry’s GitLab integration documentation](https://docs.sentry.io/workflow/integrations/global-integrations/gitlab/?utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM) to get started.\n\n### What’s next?\n\nAgain, why stop there, when we can do even more? GitLab is currently working to bring Sentry into the GitLab interface. Soon, GitLab and Sentry users will see their Sentry errors listed in their GitLab projects. Read the documentation on [the integration here](https://docs.gitlab.com/ee/operations/error_tracking.html).\n\n### About the guest author\n\nEva Sasson is a Product Marketer at [Sentry.io](https://sentry.io/welcome/), an open source error-tracking tool that gives developers the contextual information they need to resolve issues quickly, and integrates with the other development tools across the stack.\n",[9,873,721,232,827,697,874,763,875],{"slug":4736,"featured":6,"template":700},"sentry-integration-blog-post","content:en-us:blog:sentry-integration-blog-post.yml","Sentry Integration Blog Post","en-us/blog/sentry-integration-blog-post.yml","en-us/blog/sentry-integration-blog-post",{"_path":4742,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4743,"content":4749,"config":4754,"_id":4756,"_type":14,"title":4757,"_source":16,"_file":4758,"_stem":4759,"_extension":19},"/en-us/blog/setting-up-gitlab-ci-for-android-projects",{"title":4744,"description":4745,"ogTitle":4744,"ogDescription":4745,"noIndex":6,"ogImage":4746,"ogUrl":4747,"ogSiteName":685,"ogType":686,"canonicalUrls":4747,"schema":4748},"Setting up GitLab CI for Android projects","Learn how to set up GitLab CI to ensure your Android app compiles and passes tests.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666699/Blog/Hero%20Images/banner.jpg","https://about.gitlab.com/blog/setting-up-gitlab-ci-for-android-projects","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Setting up GitLab CI for Android projects\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2018-10-24\",\n      }",{"title":4744,"description":4745,"authors":4750,"heroImage":4746,"date":4751,"body":4752,"category":718,"tags":4753},[803],"2018-10-24","Note: This is a new version of a previously published blog post, updated for\nthe current Android API level (28). Thanks Grayson Parrelli for authoring\n[the original post](/blog/setting-up-gitlab-ci-for-android-projects/)!\n\n{: .alert .alert-info}\n\n\nHave you ever accidentally checked on a typo that broke your Android build\nor unknowingly broke an important use case with a new change? Continuous\nintegration is a way for developers to avoid these headaches, allowing you\nto confirm that changes to your app compile, and your tests pass before\nthey're merged in.\n\n\n[GitLab CI/CD](/solutions/continuous-integration/) is a wonderful [continuous\nintegration](/blog/continuous-integration-delivery-and-deployment-with-gitlab/)\nbuilt-in solution, and in this post we'll walk through how to set up a basic\nconfig file (`.gitlab-ci.yml`) to ensure your Android app compiles and\npasses unit and functional tests. We assume that you know the process of\ncreating an Android app, can write and run tests locally, and are familiar\nwith the basics of the GitLab UI.\n\n\n## Our sample project\n\n\nWe'll be working with a real-world open source Android project called\n[Materialistic](https://github.com/hidroh/materialistic) to demonstrate how\neasy it is to get up and running with GitLab CI for Android. Materialistic\ncurrently uses Travis CI with GitHub, but switching over is a breeze. If you\nhaven't seen Materialistic before, it's a fantastic open source Android\nreader for [Hacker News](https://news.ycombinator.com).\n\n\n### Testing\n\n\n[Unit\ntests](https://developer.android.com/training/testing/unit-testing/index.html)\nare the fundamental tests in your app testing strategy, from which you can\nverify that the logic of individual units is correct. They are a fantastic\nway to catch regressions when making changes to your app. They run directly\non the Java Virtual Machine (JVM), so you don't need an actual Android\ndevice to run them.\n\n\nIf you already have working unit tests, you shouldn't have to make any\nadjustments to have them work with GitLab CI. Materialistic uses\n[Robolectric](http://robolectric.org/) for tests,\n[Jacoco](https://www.eclemma.org/jacoco/) for coverage, and also has a\nlinting pass. We'll get all of these easily running in our `.gitlab-ci.yml`\nexample except for Jacoco, since that requires a secret token we do not have\n- though I will show you how to configure that in your own projects.\n\n\n## Setting up GitLab CI\n\n\nWe want to be able to configure our project so that our app is built, and it\nhas the complete suite of tests run upon check-in. To do so, we have to\ncreate our GitLab CI configuration file, called `.gitlab-ci.yml`, and place\nit in the root of our project.\n\n\nSo, first things first: If you're just here for a snippet to copy-paste,\nhere is a `.gitlab-ci.yml` that will build and test the Materialistic app:\n\n\n```yml\n\nimage: openjdk:8-jdk\n\n\nvariables:\n  ANDROID_COMPILE_SDK: \"28\"\n  ANDROID_BUILD_TOOLS: \"28.0.2\"\n  ANDROID_SDK_TOOLS:   \"4333796\"\n\nbefore_script:\n  - apt-get --quiet update --yes\n  - apt-get --quiet install --yes wget tar unzip lib32stdc++6 lib32z1\n  - wget --quiet --output-document=android-sdk.zip https://dl.google.com/android/repository/sdk-tools-linux-${ANDROID_SDK_TOOLS}.zip\n  - unzip -d android-sdk-linux android-sdk.zip\n  - echo y | android-sdk-linux/tools/bin/sdkmanager \"platforms;android-${ANDROID_COMPILE_SDK}\" >/dev/null\n  - echo y | android-sdk-linux/tools/bin/sdkmanager \"platform-tools\" >/dev/null\n  - echo y | android-sdk-linux/tools/bin/sdkmanager \"build-tools;${ANDROID_BUILD_TOOLS}\" >/dev/null\n  - export ANDROID_HOME=$PWD/android-sdk-linux\n  - export PATH=$PATH:$PWD/android-sdk-linux/platform-tools/\n  - chmod +x ./gradlew\n  # temporarily disable checking for EPIPE error and use yes to accept all licenses\n  - set +o pipefail\n  - yes | android-sdk-linux/tools/bin/sdkmanager --licenses\n  - set -o pipefail\n\nstages:\n  - build\n  - test\n\nlintDebug:\n  stage: build\n  script:\n    - ./gradlew -Pci --console=plain :app:lintDebug -PbuildDir=lint\n\nassembleDebug:\n  stage: build\n  script:\n    - ./gradlew assembleDebug\n  artifacts:\n    paths:\n    - app/build/outputs/\n\ndebugTests:\n  stage: test\n  script:\n    - ./gradlew -Pci --console=plain :app:testDebug\n```\n\n\nWell, that's a lot of code! Let's break it down.\n\n\n### Understanding `.gitlab-ci.yml`\n\n\n#### Defining the Docker Image\n\n{:.special-h4}\n\n\n```yml\n\nimage: openjdk:8-jdk\n\n```\n\n\nThis tells [GitLab Runners](https://docs.gitlab.com/ee/ci/runners/) (the\nthings that are executing our build) what [Docker\nimage](https://hub.docker.com/explore/) to use. If you're not familiar with\n[Docker](https://hub.docker.com/), the TL;DR is that Docker provides a way\nto create a completely isolated version of a virtual operating system\nrunning in its own\n[container](https://www.sdxcentral.com/cloud/containers/definitions/what-is-docker-container-open-source-project/).\nAnything running inside the container thinks it has the whole machine to\nitself, but in reality there can be many containers running on a single\nmachine. Unlike full virtual machines, Docker containers are super fast to\ncreate and destroy, making them great choices for setting up temporary\nenvironments for building and testing.\n\n\nThis [Docker image (`openjdk:8-jdk`)](https://hub.docker.com/_/openjdk/)\nworks perfectly for our use case, as it is just a barebones installation of\nDebian with Java pre-installed. We then run additional commands further down\nin our config to make our image capable of building Android apps.\n\n\n#### Defining variables\n\n\n```yml\n\nvariables:\n  ANDROID_COMPILE_SDK: \"28\"\n  ANDROID_BUILD_TOOLS: \"28.0.2\"\n  ANDROID_SDK_TOOLS:   \"4333796\"\n```\n\n\nThese are variables we'll use throughout our script. They're named to match\nthe properties you would typically specify in your app's `build.gradle`.\n\n\n- `ANDROID_COMPILE_SDK` is the version of Android you're compiling with. It\nshould match `compileSdkVersion`.\n\n- `ANDROID_BUILD_TOOLS` is the version of the Android build tools you are\nusing. It should match `buildToolsVersion`.\n\n- `ANDROID_SDK_TOOLS` is a little funny. It's what version of the command\nline tools we're going to download from the [official\nsite](https://developer.android.com/studio/index.html). So, that number\nreally just comes from the latest version available there.\n\n\n#### Installing packages\n\n{:.special-h4}\n\n\n```yml\n\nbefore_script:\n  - apt-get --quiet update --yes\n  - apt-get --quiet install --yes wget tar unzip lib32stdc++6 lib32z1\n```\n\n\nThis starts the block of the commands that will be run before each job in\nour config.\n\n\nThese commands ensure that our package repository listings are up to date,\nand it installs packages we'll be using later on, namely: `wget`, `tar`,\n`unzip`, and some packages that are necessary to allow 64-bit machines to\nrun Android's 32-bit tools.\n\n\n#### Installing the Android SDK\n\n\n```yml\n  - wget --quiet --output-document=android-sdk.zip https://dl.google.com/android/repository/sdk-tools-linux-${ANDROID_SDK_TOOLS}.zip\n  - unzip -d android-sdk-linux android-sdk.zip\n  - echo y | android-sdk-linux/tools/bin/sdkmanager \"platforms;android-${ANDROID_COMPILE_SDK}\" >/dev/null\n  - echo y | android-sdk-linux/tools/bin/sdkmanager \"platform-tools\" >/dev/null\n  - echo y | android-sdk-linux/tools/bin/sdkmanager \"build-tools;${ANDROID_BUILD_TOOLS}\" >/dev/null\n```\n\n\nHere we're downloading the Android SDK tools from their official location,\nusing our `ANDROID_SDK_TOOLS` variable to specify the version. Afterwards,\nwe're unzipping the tools and running a series of `sdkmanager` commands to\ninstall the necessary Android SDK packages that will allow our app to build.\n\n\n#### Setting up the environment\n\n\n```yml\n  - export ANDROID_HOME=$PWD/android-sdk-linux\n  - export PATH=$PATH:$PWD/android-sdk-linux/platform-tools/\n  - chmod +x ./gradlew\n  # temporarily disable checking for EPIPE error and use yes to accept all licenses\n  - set +o pipefail\n  - yes | android-sdk-linux/tools/bin/sdkmanager --licenses\n  - set -o pipefail\n```\n\n\nFinally, we wrap up the `before_script` section of our config with a few\nremaining tasks. First, we set the `ANDROID_HOME` environment variable to\nthe SDK location, which is necessary for our app to build. Next, we add the\nplatform tools to our `PATH`, allowing us to use the `adb` command without\nspecifying its full path, which is important when we run a downloaded script\nlater. Next, we ensure that `gradlew` is executable, as sometimes Git will\nmess up permissions.\n\n\nThe next command `yes | android-sdk-linux/tools/bin/sdkmanager --licenses`\nis responsible for accepting the SDK licenses. Because the unix `yes`\ncommand results in an EPIPE error once the pipe is broken (when the\nsdkmanager quits normally), we temporarily wrap the command in `+o pipefile`\nso that it does not terminate script execution when it fails.\n\n\n#### Defining the stages\n\n\n```yml\n\nstages:\n  - build\n  - test\n```\n\n\nHere we're defining the different\n[stages](https://docs.gitlab.com/ee/ci/yaml/#stages) of our build. We can\ncall these anything we want. A stage can be thought of as a group of\n[jobs](https://docs.gitlab.com/ee/ci/jobs/). All of the jobs in the same\nstage happen in parallel, and all jobs in one stage must be completed before\nthe jobs in the subsequent stage begin. We've defined two stages: `build`\nand `test`. They do exactly what you think: the `build` stage ensures the\napp compiles, and the `test` stage runs our unit and functional tests.\n\n\n#### Building the app\n\n\n```yml\n\nlintDebug:\n  stage: build\n  script:\n    - ./gradlew -Pci --console=plain :app:lintDebug -PbuildDir=lint\n\nassembleDebug:\n  stage: build\n  script:\n    - ./gradlew assembleDebug\n  artifacts:\n    paths:\n    - app/build/outputs/\n```\n\n\nThis defines our first job, called `build`. It has two parts - a linter to\nensure that the submitted code is up to snuff, and the actual compilation of\nthe code (and configuration of the `artifacts` that GitLab should expect to\nfind). These are run in parallel for maximum efficiency.\n\n\n#### Running tests\n\n\n```yml\n\ndebugTests:\n  stage: test\n  script:\n    - ./gradlew -Pci --console=plain :app:testDebug\n```\n\n\nThis defines a job called `debugTests` that runs during the `test` stage.\nNothing too crazy here about setting something simple like this up!\n\n\nIf we had wanted to get Jacoco also working, that would be very\nstraightforward. Simply adding a section as follows would work - the only\nadditional thing you'd need to do is add a secret variable containing your\npersonal `COVERALLS_REPO_TOKEN`:\n\n\n```yml\n\ncoverageTests:\n  stage: test\n  script:\n    - ./gradlew -Pci --console=plain jacocoTestReport coveralls\n```\n\n\n## Run your new CI setup\n\n\nAfter you've added your new `.gitlab-ci.yml` file to the root of your\ndirectory, just push your changes to the appropriate branch and off you go!\nYou can see your running builds in the **Pipelines** tab of your project.\nYou can even watch your build execute live and see the runner's output,\nallowing you to debug problems easily.\n\n\n![Pipelines tab\nscreenshot](https://about.gitlab.com/images/blogimages/gitlab-ci-for-android-2018/tutorial-01.png){:.shadow}\n\n\nAfter your build is done, you can retrieve your build artifacts:\n\n\n- First, click on your completed build, then navigate to the Jobs tab:\n\n\n![Build details button\nscreenshot](https://about.gitlab.com/images/blogimages/gitlab-ci-for-android-2018/tutorial-02.png){:.shadow}\n\n\nFrom here, simply click on the download button to download your build\nartifacts.\n\n\n## Conclusion\n\n\nSo, there you have it! You now know how to create a GitLab CI config that\nwill ensure your app:\n\n\n- Compiles\n\n- Passes tests\n\n- Allows you to access your build artifacts (like your\n[APK](https://en.wikipedia.org/wiki/Android_application_package))\nafterwards.\n\n\nYou can take a look at my local copy of the Materialistic repository, with\neverything up and running, at [this\nlink](https://gitlab.com/jyavorska/androidblog-2018)\n\n\nEnjoy your newfound app stability :)\n\n\n\u003C!-- closes https://gitlab.com/gitlab-com/www-gitlab-com/issues/3167 -->\n\n\u003C!-- cover image: https://unsplash.com/photos/aso6SYJZGps -->\n\n\n\u003Cstyle>\n  img {\n    display: block;\n    margin: 0 auto 20px auto;\n  }\n  .special-h4 {\n    margin-top: 20px !important;\n  }\n\u003C/style>\n",[9,763],{"slug":4755,"featured":6,"template":700},"setting-up-gitlab-ci-for-android-projects","content:en-us:blog:setting-up-gitlab-ci-for-android-projects.yml","Setting Up Gitlab Ci For Android Projects","en-us/blog/setting-up-gitlab-ci-for-android-projects.yml","en-us/blog/setting-up-gitlab-ci-for-android-projects",{"_path":4761,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4762,"content":4766,"config":4771,"_id":4773,"_type":14,"title":4774,"_source":16,"_file":4775,"_stem":4776,"_extension":19},"/en-us/blog/six-more-months-ci-cd-github",{"title":4763,"description":1599,"ogTitle":4763,"ogDescription":1599,"noIndex":6,"ogImage":1200,"ogUrl":4764,"ogSiteName":685,"ogType":686,"canonicalUrls":4764,"schema":4765},"Extending free use of CI/CD for GitHub on GitLab.com","https://about.gitlab.com/blog/six-more-months-ci-cd-github","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Extending free use of CI/CD for GitHub on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2019-03-21\",\n      }",{"title":4763,"description":1599,"authors":4767,"heroImage":1200,"date":4768,"body":4769,"category":300,"tags":4770},[1445],"2019-03-21","\n\nUPDATE: We've [extended again until Mar. 22, 2020](/blog/ci-cd-github-extended-again/)\n\n[CI/CD is one of the best parts of GitLab](/topics/ci-cd/). Our robust feature set and powerful Runner architecture have earned us some strong industry accolades. While we believe using GitLab end to end as a single application is the best experience, we also believe in [playing well with others](/handbook/product/gitlab-the-product/#plays-well-with-others) so that you can use the tools you want without vendor lock-in. In this spirit, we built [CI/CD for external repos](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/) and [CI/CD for GitHub](/solutions/github/) to allow you to host your code repositories on GitHub.com, GitHub Enterprise, BitBucket, or any Git server, while using GitLab CI/CD to build, test, and deploy your code.\n\nWe decided to extend the deadline for using CI/CD for external repos, including CI/CD for GitHub, until **Sep. 22, 2019**. You’ll now have an additional six months to enjoy CI/CD for external repos as a [Free or Bronze](/user on GitLab.com. This feature will continue to be part of the [Premium tier](/pricing/premium/) for GitLab Self-managed.\n\n## Always free for open source\n\nThis extension applies to private repos hosted on GitLab.com. As part of our commitment to open source, public projects get [all the features of Gold for free](/pricing/). GitLab CI/CD for GitHub works by automatically mirroring your repos to GitLab.com. As such, if you have a public project on GitHub, it will also be public on GitLab so you can always take advantage of GitLab CI/CD for public projects.\n\n## Why we're extending the offer\n\nIn full [transparency](https://handbook.gitlab.com/handbook/values/#transparency), there are a few reasons we decided on an extension.\n\nThe first reason is that we didn’t want to ruin anyone’s day by shutting off functionality without fair warning. We don’t currently have all of the instrumentation in place to give us confidence that we can appropriately notify users, so we'll spend some time in the coming months to build this ability. We want to give ample opportunity for everyone currently enjoying the functionality on GitLab.com Free and Starter to make the choice to upgrade or migrate.\n\nThe second reason is the changing CI/CD market landscape. With recent developments – like the [consolidation of the CI/CD market](/blog/ci-cd-market-consolidation/) and the launch of the [Continuous Delivery Foundation](/blog/gitlab-joins-cd-foundation/) – we’ve seen greater interest in using GitLab CI/CD with other Git hosting options. Extending the timeline will allow more folks to test it out.\n\nFinally, we want to take this time to capture additional feedback on how you use this feature so we can improve it. If you are using GitLab CI/CD with any external Git repository, like GitHub.com, GitHub Enterprise, BitBucket, or even  your own vanilla Git server, we’d love to hear why you keep your code where you do, what you like about GitLab CI/CD, and what we can improve. We have several open channels for feedback so please leave a comment on this post, send us a message on Twitter with the hashtag #GitLabCICD, or log an issue with a bug fix or feature request on our [open issue tracker](https://gitlab.com/gitlab-org/gitlab-ce/issues). We hope you enjoy an extra six months of usage and hope to hear from you soon.\n",[9,721,268],{"slug":4772,"featured":6,"template":700},"six-more-months-ci-cd-github","content:en-us:blog:six-more-months-ci-cd-github.yml","Six More Months Ci Cd Github","en-us/blog/six-more-months-ci-cd-github.yml","en-us/blog/six-more-months-ci-cd-github",{"_path":4778,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4779,"content":4785,"config":4789,"_id":4791,"_type":14,"title":4792,"_source":16,"_file":4793,"_stem":4794,"_extension":19},"/en-us/blog/solve-devsecops-challenges-with-gitlab-ci-cd",{"title":4780,"description":4781,"ogTitle":4780,"ogDescription":4781,"noIndex":6,"ogImage":4782,"ogUrl":4783,"ogSiteName":685,"ogType":686,"canonicalUrls":4783,"schema":4784},"How GitLab CI helps solve common DevSecOps challenges","How single application continuous integration helps team automate and collaborate.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681305/Blog/Hero%20Images/ci-use-case-web-header.png","https://about.gitlab.com/blog/solve-devsecops-challenges-with-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab CI helps solve common DevSecOps challenges\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-05-12\",\n      }",{"title":4780,"description":4781,"authors":4786,"heroImage":4782,"date":2074,"body":4787,"category":1040,"tags":4788},[715],"\n\nCollaboration is an important part of [DevSecOps](/solutions/security-compliance/). Effective collaboration requires visibility, not only into the work being done by other members of the team, but also into the processes that help the team produce that work in the first place. It can be hard to gauge bottlenecks, solve problems, fix bugs, or work agilely if everyone is juggling their own set of tools or siloed within their own environments.\n\n\n## DevSecOps challenges\n\nOne of the reasons that we frequently discuss toolchain complexity is that it can hinder development speed in significant ways. [In a survey conducted by Forrester](/resources/whitepaper-forrester-manage-your-toolchain/) of over 250 IT professionals, 45% said they were using three or more tools for software delivery. Of those using three tools or more, **two-thirds were using eleven or more tools per toolchain**. While using multiple tools isn’t a bad thing in itself, it adds layers of complexity to processes that are already pretty complicated.\n\nIntegrated toolchains require regular maintenance. If teams rely on a [plugin environment](/blog/plugin-instability/), there are also dependencies that need to be monitored and updated. For teams using microservices, they may also have to contend with 20 different pipelines, each with hundreds of shell script outputs. Dealing with [brittle pipelines](https://harness.io/2018/09/4-reasons-your-jenkins-pipelines-are-brittle/) is a common challenge, and for those using plugins it can be difficult to assess whether the pipeline itself is broken vs. the actual software artifact or build that’s being tested.\n\nFrom an operations perspective, managing multiple toolchains is time-consuming. When problems or errors arise and need to be sent back to the developer, it becomes difficult to troubleshoot because the code isn’t fresh in their mind (also known as context switching). Instead of focusing on building applications, developers worry about environments. Instead of focusing on infrastructure optimization, operations teams have to put out fires.\n\nDevSecOps teams need to be able to collaborate, and visibility is a key component in helping teams work better together. By simplifying the toolchain, it reduces barriers to communication and gives [DevOps access](/topics/devops/) to the entire software development lifecycle (SDLC). When teams can build, test, and deploy with single sign-on simplicity, they can solve problems and share knowledge all in one place.\n\nGitLab’s [complete DevOps platform](/solutions/devops-platform/), delivered as a single application, offers built-in CI/CD so that teams can test and deploy all from one interface. Instead of logging into multiple tools, everyone has access to the same information.\n\n## Benefits of GitLab CI/CD\n\n1. **Eliminate siloes:** A complicated toolchain isolates teams and tools, creating bottlenecks in the development lifecycle. GitLab brings dev, sec, and ops together in one interface.\n2. **Greater visibility:** With full visibility across the entire SDLC, teams can solve problems faster with fewer roadblocks.\n3. **Increased efficiency:** Instead of managing a brittle plugin environment or maintaining multiple tools, teams can focus on more productive tasks.\n4. **Industry-leading CI/CD:** Teams don't have to sacrifice functionality for convenience. GitLab's CI/CD offers everything teams need for cloud native application development and was [voted a leader in CI by the Forrester Wave](/analysts/forrester-cloudci19/).\n\nTo learn more about single application CI/CD, download our eBook and see how we compare to other CI tools.\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nThe benefits of single application CI/CD eBook - [Read here](/why/use-continuous-integration-to-build-and-test-faster/)!\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n",[9,875],{"slug":4790,"featured":6,"template":700},"solve-devsecops-challenges-with-gitlab-ci-cd","content:en-us:blog:solve-devsecops-challenges-with-gitlab-ci-cd.yml","Solve Devsecops Challenges With Gitlab Ci Cd","en-us/blog/solve-devsecops-challenges-with-gitlab-ci-cd.yml","en-us/blog/solve-devsecops-challenges-with-gitlab-ci-cd",{"_path":4796,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4797,"content":4803,"config":4808,"_id":4810,"_type":14,"title":4811,"_source":16,"_file":4812,"_stem":4813,"_extension":19},"/en-us/blog/sourcegraph-code-intelligence-integration-for-gitlab",{"title":4798,"description":4799,"ogTitle":4798,"ogDescription":4799,"noIndex":6,"ogImage":4800,"ogUrl":4801,"ogSiteName":685,"ogType":686,"canonicalUrls":4801,"schema":4802},"Native code intelligence is coming to GitLab","We're enhancing code review with Sourcegraph – no extra plugins required.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673090/Blog/Hero%20Images/random_code.jpg","https://about.gitlab.com/blog/sourcegraph-code-intelligence-integration-for-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Native code intelligence is coming to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mayank Tahilramani\"}],\n        \"datePublished\": \"2019-11-12\",\n      }",{"title":4798,"description":4799,"authors":4804,"heroImage":4800,"date":4805,"body":4806,"category":783,"tags":4807},[2858],"2019-11-12","\nAlmost a year ago, our CEO [Sid Sijbrandij](/company/team/#sytses) opened an issue proposing [GitLab integrate with Sourcegraph to provide advanced code navigation and cross-referencing functionality for source code we host](https://gitlab.com/gitlab-org/gitlab/issues/20642). We knew this feature would be a big improvement to the Developer UX in our product, particularly for efficient code review. We also knew [Sourcegraph](https://about.sourcegraph.com/) has an open-core product with one of the best-in-class code navigation capabilities. It only made sense to have a tighter integration between the two products.\n\n## How we built this\n\nSo, our generous friends at Sourcegraph got to work. A [browser extension supporting GitLab](https://docs.sourcegraph.com/integration/gitlab) was already available, but Sourcegraph collaborated with our engineering and product management teams and added the integration directly to the GitLab codebase – powered by GitLab.com and Sourcegraph.com. The integration gives users a fully browser-based developer platform, with no extra plugins required.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/LjVxkt4_sEA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\nGitLab CEO and co-founder Sid Sijbrandij and Sourcegraph CEO and co-founder Quinn Slack explain the new integration.\n{: .note.text-center}\n\nFor now, get a sneak preview of how our integration with Sourcegraph works by watching a [quick screencast tutorial](https://vimeo.com/372226334/de668e24fa).\n\nThe process of building the integration between Sourcegraph and GitLab is a great example of our [transparency](https://handbook.gitlab.com/handbook/values/#transparency) and [collaboration](https://handbook.gitlab.com/handbook/values/#collaboration) values at work.\n\n## Collaboration in the open\n\n[Sourcegraph’s contribution to GitLab](https://gitlab.com/gitlab-org/gitlab/merge_requests/16556) is significant for developer productivity. For example, their merge request (MR) adds native support for features like ‘go-to-definition’ and ‘find references’ within a hover tooltip. Users can engage the tooltip UI in code views, file views, merge requests, and code diffs. Developers can stay in context during code reviews when they need to investigate a function implementation by simply hovering over the name of the function to navigate efficiently. Within the tooltip, users can see the definition of the function, navigate to the definition, or show other references in the code where the function is being used. In addition to making code reviews higher quality and more efficient, developers will have an easier time investigating complex implementations when reading the source of their favorite library. With Sourcegraph, we’re enabling developers with a richer UX by gathering more information about the code they are reading.\n\nSee for yourself by reading the discussions on the [MR](https://gitlab.com/gitlab-org/gitlab/merge_requests/16556) and viewing changes made to the code. As always, we’re collaborating in the open and encourage the community to provide constructive feedback on our project. Drop a line in the blog comments to share your thoughts.\n\nFor a more detailed overview of the UX of functionality and features, check out [this blog post](https://about.sourcegraph.com/blog/gitlab-integrates-sourcegraph-code-navigation-and-code-intelligence) by Christina Forney, product manager at Sourcegraph.\n\n## What does this mean for our users?\n\nGitlab’s integration with Sourcegraph will be available in our [12.5 release](/upcoming-releases/) on November 22, 2019. We aim to provide code intelligence and code navigation functionality in this integration which was historically provided by the Sourcegraph’s browser extension. Now that we built this integration the browser extension is no longer needed to provide this functionality.\n\nIn the spirit of [iteration](https://handbook.gitlab.com/handbook/values/#iteration) our rollout strategy on GitLab.com is to **first dogfood** the functionality within our [*gitlab-org*](https://gitlab.com/gitlab-com/) group, which is where GitLab stores [source code for GitLab.com](/solutions/source-code-management/) and GitLab Enterprise. Over time, we aim to roll out Sourcegraph capabilities across code views within projects to all *public projects* on GitLab.com. Users will still require the browser extension configured to a private instance of Sourcegraph for **private projects** on GitLab.com.\n\nIf you’re self-managing your GitLab EE deployment and would like to enable Sourcegraph code intelligence, you must have a private Sourcegraph instance running as an external service. This is required because Sourcegraph.com does not index any private code for privacy and security reasons. We will have formal documentation on how to get started with GitLab EE and Sourcegraph soon, but if you’re super curious, [you can see our work in progress here](https://gitlab.com/gitlab-org/gitlab/blob/ps-sourcegraph-playground/doc/integration/sourcegraph.md) within the MR branch.\n\n## What’s next?\n\nStay tuned for our 12.5 release announcement on November 22 and updates containing details around our integration with Sourcegraph. Give us a [thumbs up](https://gitlab.com/gitlab-org/gitlab/merge_requests/16556) if you like what we’re working on. If you’re new to Sourcegraph and/or GitLab, [sign up here](https://gitlab.com/users/sign_up) and install [the browser extension](https://docs.sourcegraph.com/integration/gitlab#browser-extension) to test out these features right away. [Here is a link to a file in one of our public projects where you can test out these features](https://gitlab.com/gitlab-org/gitlab-runner/blob/master/executors/ssh/executor_ssh.go).\n\n[Cover photo](https://unsplash.com/photos/qjnAnF0jIGk) by [Markus Spiske](https://unsplash.com/@markusspiske) on Unsplash.\n{: .note}\n",[9,830,232],{"slug":4809,"featured":6,"template":700},"sourcegraph-code-intelligence-integration-for-gitlab","content:en-us:blog:sourcegraph-code-intelligence-integration-for-gitlab.yml","Sourcegraph Code Intelligence Integration For Gitlab","en-us/blog/sourcegraph-code-intelligence-integration-for-gitlab.yml","en-us/blog/sourcegraph-code-intelligence-integration-for-gitlab",{"_path":4815,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4816,"content":4821,"config":4826,"_id":4828,"_type":14,"title":4829,"_source":16,"_file":4830,"_stem":4831,"_extension":19},"/en-us/blog/speed-secure-software-delivery-devsecops",{"title":4817,"description":4818,"ogTitle":4817,"ogDescription":4818,"noIndex":6,"ogImage":951,"ogUrl":4819,"ogSiteName":685,"ogType":686,"canonicalUrls":4819,"schema":4820},"Speed up secure software delivery with DevSecOps","It’s time to shift left: Embed security into your DevOps workflow to increase speed, quality, and efficiency in the SDLC.","https://about.gitlab.com/blog/speed-secure-software-delivery-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Speed up secure software delivery with DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-04-30\",\n      }",{"title":4817,"description":4818,"authors":4822,"heroImage":951,"date":4823,"body":4824,"category":1040,"tags":4825},[1895],"2019-04-30","\n\nDevOps is a revolutionary step forward in efficient software delivery, but teams\noften face painful delays when releases are put through security testing.\nSecurity is critical for every digital entity, but often adds tension to a\nprocess that is already under pressure for speed and cost efficiency. For many,\nsoftware delivery resembles an assembly-line style of work where employees have\nto constantly stop and start their work on different projects, breaking\ntheir mental flow and straining relationships between teams.\n\nTo illustrate, let’s trade software for [Ford’s Model Ts](https://www.history.com/this-day-in-history/fords-assembly-line-starts-rolling)\nfor a minute. Software development closely resembles development of those first cars\nmanufactured by Ford: Each worker makes a contribution and hands off to the\nnext, and then the security pros take it for a test drive (or look for\nvulnerabilities). But if the car doesn’t function properly, it’s sent back to\nthe beginning of the line to the developers who have already begun working on\na different vehicle.\n\nBack to software. How can teams solve this back-and-forth without foregoing\nquality? They must embed security into the development workflow.\n\n## Integrate and automate end-to-end security\n\nWhen security is embedded into the developer workflow, developers can respond\nto vulnerability alerts _while_ they’re writing code. Within the developer's\npipeline report in GitLab, individual vulnerabilities are presented to the developer for\nreview. Alerts could include unsafe code, dangerous attributes, and other\nvulnerabilities that could put your application at risk. The developer is able\nto look into each alert, determine whether it needs to be addressed or can be\ndismissed, and then address each alert while moving through the\ndevelopment process. In the [Security Group Dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/), the security analyst is able to see which alerts the developer was unable to resolve as well as what\nwas dismissed, making sure no vulnerabilities slip through the cracks.\n\n### Gain speed and efficiency with DevSecOps\n\nEmbedded security checks allow developers to pass off a streamlined workflow to\ntheir security peers. Security then focuses on the most important risks and\nthreats with the typical mountain of checks reduced to a much shorter list.\nShortened test times lead to much faster releases: Wag! (a dog-walking app)\n[brought their release time down from 40 minutes to just six.](/blog/wag-labs-blog-post/)\n\nStandard release processes place an unnecessary burden on your teams when a\nlimited number of engineers can work on them and project handoff actually\nimpedes completion. The ability to work concurrently within the same environment\nrepresents much more than a shift left: It redefines the entire DevOps\nlifecycle, enabling greater efficiency and collaboration on a single source\nof truth.\n\n### How it works\n\n[Static application security testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)\nbrings vulnerabilities to developers so they can review gaps in their code\n_within_ their own working environment before passing the project off to\nsecurity. This integration mitigates the friction that often stands between dev\nand security, allowing security to graduate from roadblock status to critical\nworkflow component. The collaborative nature of [SAST within tools like GitLab](https://docs.gitlab.com/ee/user/application_security/sast/)\nallows different teams to access the project at any time, eliminating any\ncumbersome linear processes and breaking down silos within the larger\norganization.\n\n## Accelerate delivery and build productivity by testing closer to remediation\n\nShifting left might ring alarm bells for some, but don’t worry – developers\nwon’t be solving _every_ security problem. The idea is to alert your dev team to\nthe code fixes that would be easiest for them to solve, rather than making the\nsecurity team do the digging. This switch will streamline the overall workflow,\nallowing the security team to focus on more critical risks and reducing handoff\nbetween security and dev.\n\n[DevSecOps](/topics/devsecops/) integrates security into your CI/CD processes, allowing your teams to\nwork quickly, collaborate efficiently, and produce secure and\nquality software at every release.\n\nAre you ready to build security into your DevOps practices? [Just commit.](https://about.gitlab.com/solutions/security-compliance/)\n{: .alert .alert-gitlab-purple .text-center}\n",[9,721,697],{"slug":4827,"featured":6,"template":700},"speed-secure-software-delivery-devsecops","content:en-us:blog:speed-secure-software-delivery-devsecops.yml","Speed Secure Software Delivery Devsecops","en-us/blog/speed-secure-software-delivery-devsecops.yml","en-us/blog/speed-secure-software-delivery-devsecops",{"_path":4833,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4834,"content":4840,"config":4845,"_id":4847,"_type":14,"title":4848,"_source":16,"_file":4849,"_stem":4850,"_extension":19},"/en-us/blog/strategies-microservices-architecture",{"title":4835,"description":4836,"ogTitle":4835,"ogDescription":4836,"noIndex":6,"ogImage":4837,"ogUrl":4838,"ogSiteName":685,"ogType":686,"canonicalUrls":4838,"schema":4839},"Implementing microservices architectures and deployment strategies","Want to dump the monolith and get into microservices? Consider these three methods.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662898/Blog/Hero%20Images/microservices-explosion.jpg","https://about.gitlab.com/blog/strategies-microservices-architecture","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Implementing microservices architectures and deployment strategies\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-06-17\",\n      }",{"title":4835,"description":4836,"authors":4841,"heroImage":4837,"date":4842,"body":4843,"category":1040,"tags":4844},[715],"2019-06-17","\n\nMicroservices can have a major impact on organizations looking to increase automation and deployment speed. The biggest companies in the world – Amazon, Netflix, Google, etc. – all work on this architecture model and release at lightning speed. So why is using microservices so effective? The easiest way to understand [microservices architecture](/blog/what-are-the-benefits-of-a-microservices-architecture/) is by comparing it to its counterpart – the monolith.\n\nWith a monolithic architecture, all of the components are part of a single unit: Everything is developed, deployed, and scaled together. In comparison, [microservices](/topics/microservices/) have each component broken out and deployed individually as services, and these services communicate with each other via API calls. For complex applications that need to run at scale, microservices can offer greater flexibility, reliability, and a faster pace of innovation than monoliths.\n\nNo, monoliths aren’t inherently bad, but teams stuck in a monolith system often sacrifice speed for simplicity, and that could haunt them in the long term. So what do you do when you want to make the switch to microservices and start implementing faster? Consider these options.\n\n## The strangler method\n\n[Martin Fowler’s strangler method](https://www.martinfowler.com/bliki/StranglerApplication.html) was inspired by a trip he took to Australia:\n\n> “One of the natural wonders of this area [Australia] is the huge strangler vines. They seed in the upper branches of a fig tree and gradually work their way down the tree until they root in the soil. Over many years they grow into fantastic and beautiful shapes, meanwhile strangling and killing the tree that was their host.”\n\nIt sounds brutal based on this description, but it’s actually one of the gentlest and most effective transitions for an organization. Essentially, parts of the monolith become microservices little by little until eventually the monolith is cut out completely. The benefit is that this transition is much more gradual, so uptime and availability are largely unaffected while the organization modernizes. The con? Speed.\n\n## The Lego strategy\n\nLet’s say you don’t necessarily want to ditch the monolith completely. Maybe it has a valuable use for a certain product or facet of the organization, or maybe you just don’t have the resources to dismantle it or don’t want to. The Lego strategy could be the right choice.\n\nThe team at Kong use this term because you’re essentially building on top of what you already have (like Lego blocks). Instead of switching over to microservices completely, you commit to [building new features as microservices](https://konghq.com/blog/transition-to-microservices-what-now/) while still keeping the existing monolithic codebase. While this approach doesn’t fix current issues, it will help with future expansions and buy much-needed time. This hybrid environment can exist relatively pain-free but has some risks: Increased technical debt, navigating code versions between the monolith and the new microservices features, and maintenance costs.\n\n## The nuclear option\n\nImagine: Your monolith is kaput, finito, dunzo. It can’t be fixed and it can’t stay. What now? As the name suggests, going nuclear is the riskiest and rarest option of all. The upside is that you can start from scratch. The downside is... you start from scratch. This approach is risky because you do run the risk of downtime when everything shifts over to microservices – which is a real no-no for user experience. Infrastructure is best when it’s invisible, and a new microservices architecture won’t win back the favor of users that were inconvenienced. Then again, maybe your new microservices architecture was built perfectly and cloud, software, and staff are perfectly in place and users will never know the difference. That’s the risk of a full rip-and-replace.\n\n## A successful transition to microservices\n\n[The team at Verizon was able to reduce its data center deploys from 30 days to _under eight hours_ by utilizing microservices](/blog/verizon-customer-story/), and their application modernization strategy centered around four key goals:\n\n*   Architecture\n*   Automation\n*   Extensibility\n*   Being proactive\n\nBy having clear goals throughout the process, the Verizon team was able to remove manual deployments and streamline their processes. When adopting a microservices model, it helps to have some clear objectives about what you would like to achieve, and prioritize certain outcomes over others. Modernization projects almost never go according to plan, and if you have to make tough decisions, having a list of ‘must-haves’ can guide the conversation.\n\nThe oldest argument for monoliths has always been their simplicity: They’re easy to build and easy to run. While it was once difficult to develop applications with a microservices architecture, over the past five years it has become considerably easier with container orchestration tools like Kubernetes, [comprehensive CI/CD tools](/solutions/continuous-integration/) that automate testing and deployments, and APIs that update automatically. Developers can focus on innovating rather than completing manual tasks and maintaining legacy systems. Organizations that adopt microservices get their simplicity through automated processes, and while it’s not as simple as a monolith, the benefits far outweigh the cons.\n\nRegardless of which method you choose, the willingness to modernize to the latest [DevOps](/topics/devops/) architecture is the most important first step. Ready to dive into microservices?\n\n[Just commit](/blog/application-modernization-best-practices/).\n{: .alert .alert-gitlab-purple .text-center}\n",[721,9],{"slug":4846,"featured":6,"template":700},"strategies-microservices-architecture","content:en-us:blog:strategies-microservices-architecture.yml","Strategies Microservices Architecture","en-us/blog/strategies-microservices-architecture.yml","en-us/blog/strategies-microservices-architecture",{"_path":4852,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4853,"content":4858,"config":4863,"_id":4865,"_type":14,"title":4866,"_source":16,"_file":4867,"_stem":4868,"_extension":19},"/en-us/blog/streamline-devsecops-engineering-workflows-with-gitlab-duo",{"title":4854,"description":4855,"ogTitle":4854,"ogDescription":4855,"noIndex":6,"ogImage":1621,"ogUrl":4856,"ogSiteName":685,"ogType":686,"canonicalUrls":4856,"schema":4857},"Streamline DevSecOps engineering workflows with GitLab Duo","Learn all the ways GitLab Duo's AI capabilities can improve the efficiency of development workflows. Includes in-depth tutorials and demos.","https://about.gitlab.com/blog/streamline-devsecops-engineering-workflows-with-gitlab-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Streamline DevSecOps engineering workflows with GitLab Duo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2024-12-05\",\n      }",{"title":4854,"description":4855,"authors":4859,"heroImage":1621,"date":4860,"body":4861,"category":849,"tags":4862},[3440],"2024-12-05","It's 9 a.m. somewhere, and a DevOps engineer is starting their day. They check their [GitLab todo list](https://docs.gitlab.com/ee/user/todos.html) to see any mentions or tasks assigned to them, collaborating with other stakeholders in their organization. These tasks can include:\n\n- managing infrastructure\n- maintaining the configuration of resources\n- maintaining CI/CD pipelines\n- automating processes for efficiency\n- maintaining monitoring and alerting systems\n- ensuring applications are securely built and deployed\n- modernizing applications with containerization\n\nTo carry out these tasks, DevOps engineers spend a lot of time reading documentation, writing configuration files, and searching for help in forums, issues boards, and blogs. Time is spent studying and understanding concepts, and how tools and technologies work. When they don't work as expected, a lot more time is spent investigating why. New tools are released regularly to solve niche or existing problems differently, which introduces more things to learn and maintain context for.\n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/), our AI-powered suite of capabilities, fits into the workflow of DevSecOps engineers, enabling them to reduce time spent solving problems while increasing their efficiency.\n\nLet's explore how GitLab Duo helps streamline workflows.\n\n## Collaboration and communication\n\nDiscussions or requests for code reviews require spending time reading comments from everyone and carefully reviewing the work shared. GitLab Duo capabilities like Discussion Summary, Code Review Summary, and Merge Request Summary increase the effectiveness of collaboration by reducing the time required to get caught up on activities and comments, with more time spent getting the actual work done.\n\n### Merge Request Summary  \n\nWriting a detailed and clear summary of the change a merge request introduces is crucial for every stakeholder to understand what, why, and how a change was made. It's more difficult than it sounds to effectively articulate every change made, especially in a large merge request. [Merge Request Summary](https://docs.gitlab.com/ee/user/project/merge_requests/duo_in_merge_requests.html#generate-a-description-by-summarizing-code-changes) analyzes the change's diff and provides a detailed summary of the changes made.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4muvSFuWWL4?si=1i2pkyqXZGn2dSbd\" title=\"GitLab Duo Chat is now aware of Merge Requests\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Discussion Summary\n\nImagine getting pulled into an issue with more than 100 comments and a lengthy description, with different perspectives and opinions shared. GitLab Duo [Discussion Summary](https://docs.gitlab.com/ee/user/discussions/index.html#summarize-issue-discussions-with-duo-chat) summarizes all the conversations in the issue and identifies tasks that need to be done, reducing time spent. \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/IcdxLfTIUgc?si=WXlINow3pLoKHBVM\" title=\"GitLab Duo Dicussion Summary\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n#### Code Review Summary\n\nA merge request has been assigned to a DevOps engineer for review in preparation for deployment, and they have spent time reviewing several parts of the change with multiple comments and suggestions. When [submitting a review](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/index.html#submit-a-review), a text box is presented to summarize the review, which often requires taking a pause and articulating the review. With [Code Review Summary](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/index.html#submit-a-review), they get a concise summary automatically drafted leading to efficiency.\n\n## Manage infrastructure changes\n\nPart of a DevOps engineer's workflow is managing infrastructure changes. Infrastructure as code ([IaC](https://docs.gitlab.com/ee/user/infrastructure/iac/)) revolutionized this process, allowing for documentation, consistency, faster recovery, accountability, and collaboration. A challenge with IaC is understanding the requirements and syntax of the chosen tool and provider where the infrastructure will be created. A lot of time is then spent reviewing documentation and tweaking configuration files until they meet expectations. \n\nWith GitLab Duo [Code Explanation](https://docs.gitlab.com/ee/user/gitlab_duo/index.html#code-explanation) and [Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/index.html), you can prompt GitLab Duo to create configuration files in your tool of choice and learn about the syntax of those tools. With Code Suggestions, you can either leverage [code generation](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/index.html#code-generation), where you prompt GitLab Duo to generate the configuration, or code completion, which provides suggestions as you type while maintaining the context of your existing configurations.\n\nAs of the time this article was published, Terraform is [supported by default](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html#supported-languages) with the right extensions for your IDEs. Other technologies can be supported with [additional language support configuration](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html#add-support-for-more-languages) for the [GitLab Workflow extension](https://docs.gitlab.com/ee/editor_extensions/visual_studio_code/index.html).\n\nWhere a technology is not officially supported, [GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html) is the powerful AI assistant that can help generate, explain, clarify, and troubleshoot your configuration, while maintaining context from selected text or opened files. Here are two demos where GitLab Duo helped create IaC with Terraform and AWS CloudFormation.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/saa2JJ57UaQ?si=Bu9jyQWwuSUcw8vr\" title=\"Manage your Infrastructure with Terraform and AI using GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/KSLk2twXqiI?si=QDdERjbM0f7X2p23\" title=\"Deploying AWS Lambda function using AWS Cloudformation with help from GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Configuration management\n\nOnce your infrastructure is up, GitLab Duo Chat can also help create configuration files and refactor existing ones. These can be Ansible configurations for infrastructure or cloud-native configurations using Docker, Kubernetes, or Helm resource files. In the videos below, I demonstrate how GitLab Duo helps with Ansible, containerization, and application deployment to Kubernetes.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/t6ZCq_jkBwY?si=awCUdu1wCgOO21XR\" title=\"Configuring your Infrastructure with Ansible & GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/KSLk2twXqiI?si=QDdERjbM0f7X2p23\" title=\"Containerizing your application with GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/uroSxvMFqPU?si=GMNC7f2b7i_cjn6F\" title=\"Deploying your application to Kubernetes with Help from GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/9yGDM00RlUA?si=kE5JZD_OEFcxeR7E\" title=\"Deploying to Kubernetes using Helm with help from GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Test, test, test\n\nWriting tests is an important part of building secure software, but it can be a chore and often becomes an afterthought. You can leverage the power of GitLab Duo to [generate tests for your code](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/index.html#vulnerability-resolution) by highlighting your code and typing the `/tests` in the Chat panel of your IDE.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/zWhwuixUkYU?si=wI93j90PIiUMyGcV\" title=\"GitLab Duo Test Generation\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### CI/CD pipeline troubleshooting\n\nAutomation is an essential part of the DevOps engineer's workflow, and Continuous Integration/Deployment ([CI/CD](https://about.gitlab.com/topics/ci-cd/)) is central to this. You can trigger CI jobs on code push, merge, or on schedule. But, when jobs fail, you spend a lot of time reading through the logs to identify why, and for cryptic errors, it can take more time to figure out. [GitLab Duo Root Cause Analysis](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/) analyzes your failed job log and errors, and then recommends possible fixes. This reduces the time spent investigating the errors and finding a fix.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Sa0UBpMqXgs?si=IyR-skz9wJMBSicE\" title=\"GitLab Duo Root Cause Analysis\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Building secure applications\n\nPart of software development includes discovering vulnerabilities, either in the application or its dependencies. Some vulnerabilities are easy to fix, while others require creating a milestone with planning. GitLab Duo [Vulnerability Explanation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/index.html#explaining-a-vulnerability) and [Vulnerability Resolution](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/index.html#vulnerability-resolution) reduce the time spent researching and fixing vulnerabilities. Vulnerability Explanation explains why a vulnerability is happening, its impact, and how to fix it, helping the DevOps engineer to upskill. Vulnerability Resolution takes it further – instead of just suggesting a fix, it creates a merge request with a fix for the vulnerability for you to review. \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/MMVFvGrmMzw?si=Fxc4SeOkCBKwUk_k\" title=\"GitLab Duo Vulnerability Explanation\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/VJmsw_C125E?si=XT3Qz5SsX-ISfCyq\" title=\"GitLab Duo Vulnerability resolution\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## More work done with less stress\n\nWith GitLab Duo, DevOps engineers can do more work deploying and maintaining secure applications, while acquiring more skills with the detailed responses from GitLab Duo Chat.\n\n> [Sign up for a free trial of GitLab Duo](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/) to get started today!",[851,9,696,495,917,693],{"slug":4864,"featured":91,"template":700},"streamline-devsecops-engineering-workflows-with-gitlab-duo","content:en-us:blog:streamline-devsecops-engineering-workflows-with-gitlab-duo.yml","Streamline Devsecops Engineering Workflows With Gitlab Duo","en-us/blog/streamline-devsecops-engineering-workflows-with-gitlab-duo.yml","en-us/blog/streamline-devsecops-engineering-workflows-with-gitlab-duo",{"_path":4870,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4871,"content":4877,"config":4882,"_id":4884,"_type":14,"title":4885,"_source":16,"_file":4886,"_stem":4887,"_extension":19},"/en-us/blog/tech-debt",{"title":4872,"description":4873,"ogTitle":4872,"ogDescription":4873,"noIndex":6,"ogImage":4874,"ogUrl":4875,"ogSiteName":685,"ogType":686,"canonicalUrls":4875,"schema":4876},"How to use DevOps to pay off your technical debt","Technical debt is a universal problem with an equally universal solution – DevOps. Here's how DevOps can reduce the tech debt burden and help you deploy faster and more frequently.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681643/Blog/Hero%20Images/greenery.jpg","https://about.gitlab.com/blog/tech-debt","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use DevOps to pay off your technical debt\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-10-05\",\n      }",{"title":4872,"description":4873,"authors":4878,"heroImage":4874,"date":4879,"body":4880,"category":1040,"tags":4881},[1245],"2020-10-05","\n\nOne of the primary resource constraints in the [DevOps](/topics/devops/) world is technical debt. Technical debt is a metaphor created by Ward Cunningham that compares the build-up of cruft (deficiencies in the internal quality of software systems) to the accumulation of financial debt, where the effort it takes to add new features is the interest paid on the debt, writes [Martin Fowler](https://martinfowler.com/bliki/TechnicalDebt.html).\n\nIt’s common for a busy developer to write code with known imperfections, but because the priority is to ship new features as quickly as possible, deliverables are often prioritized over correcting the inefficiencies in the process.\n\nOne of the major dilemmas with determining the value of spending precious time fixing cruft versus building new features is that the costs are not objectively measurable, says Fowler. Just like with paying off financial debt, the right call is largely circumstantial as opposed to absolute.\n\n\"Given this, usually the best route is to do what we usually do with financial debts, pay the principal off gradually,\" writes Fowler.\n\nBy cleaning up some of the cruft as you work on the new features, you ensure that the most relevant code is tidier for future iterations. When it comes to crufty, but stable, code, you can leave it alone. This method is similar to paying the monthly balance on a low interest rate loan – the impact is minimal.\n\n \"In contrast, areas of high activity need a zero-tolerance attitude to cruft, because the interest payments are cripplingly high,\" writes Fowler.\n\nOne way to start dealing with technical debt is to conduct a rough audit and triage your technical debt by \"interest rate\" – high interest rate cruft is addressed with the same priority as shipping new features, while medium-to-low interest rate cruft can be dealt with in a ratio that best suits your team’s situation, because dealing with your most urgent technical debt sooner rather than later will help you save resources in the long-term.\n\n## How tech debt accumulates in your workflow\n\nIt’s not just code that contains cruft. A lot of the time, we have cruft that slows down our engineering processes. When it comes to investing time and money into updating DevOps processes, it seems there is never enough of either resource.\n\n\"We don’t let our teams spend time on improving their process because we think it’s wasted effort,\" says [Brendan O’Leary](/company/team/#brendan), senior developer evangelist at GitLab. \"But if you can spend a day fixing some things that make your workflow inefficient, and you save an hour a week from now until eternity, that’s a big difference.\"\n\nTake for instance manual deployment versus the use of automated pipelines. We know that deploying manually takes an enormous amount of time, but the upfront cost of allocating time to building automated pipelines can seem daunting.\n\nIf your team is trapped in a time-consuming cycle of technical debt, take a peek at how Minnesota-based consulting firm, [BI Worldwide](/customers/bi-worldwide/) (BIW), was able to accelerate deployments by transitioning to GitLab. In the case study, the BIW Corporate Products Development Team explains how they were stuck in a rut of manual testing and manual deployments on their on-prem infrastructure. Their toolchains were complex and inefficient, which created a dense backlog.\n\n\"It was entirely time-consuming to apply all of those code changes,\" said Adam Dehnel, product architect, BIW, in the case study. As a result, deployments were infrequent and slow as too many features were crammed into each release.\n\nThe first step to increase the speed of their deployments was to update and modernize their processes.\n\n\"[BIW] had practices and tools in place at the time but were spending time on items that weren’t business differentiating features. They faced classic issues surrounding a lack of cross-team communication including inefficient mechanisms for intra-organization workflows and individualized toolsets.\"\n\nFirst, BIW made the painful transition from CVS to Git. Next, the company aimed to automate the build, test, and deployment process and built a toolchain with tools such as GitHub, Jenkins, JIRA, and Confluence.\n\nFor BIW, this complex toolchain was buggy. One thing that was not mentioned in this specific use case, but still merits recognition, is the hidden cost of maintaining all of these different tools.\n\n\"The argument to be made there is not only is it cost of using these various tools, but also that the more tools you have, there is the overhead cost of upgrading them, maintaining them, and integrating them,\" says Brendan. \"There’s a massive hidden cost behind the cost of doing business.\"\n\nIn the next iteration, BIW embraced the efficiency of an all-in-one tool by transitioning to GitLab.\n\nBIW went from a pre-Git pace of shipping a release every nine to 12 months to deploying nearly ten times a day using GitLab Ultimate, no doubt putting a serious dent in the technical debt that followed their slower, laborious release cycle.\n\n## Conserve valuable resources and pay off technical debt with DevOps\n\nIn a previous blog post, we examined [communication strategies to get non-technical stakeholders to buy-in to DevOps](/blog/devops-stakeholder-buyin/). DevOps can help you deploy faster and more frequently, giving your business an edge over the competition, but it is also a strategy for paying off your technical debt. By first taking into account inefficiencies in your code and engineering processes, you can make a rough triage of your team's technical debt. This type of audit is the first step to identifying cruft you can trim to help speed up your cycle time, clear your backlog, and modernize your engineering processes.\n\n## Read more\n\n- [Need DevOps buy-in? Here's how to convince stakeholders](/blog/devops-stakeholder-buyin/)\n- [A guide to cloud native storage for beginners](/blog/cloud-native-storage-beginners/)\n- [Want to iterate faster? Choose boring solutions](/blog/boring-solutions-faster-iteration/)\n\nCover Photo by [Vadim L](https://unsplash.com/@sk3tch?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/plants?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[1064,999,721,9],{"slug":4883,"featured":6,"template":700},"tech-debt","content:en-us:blog:tech-debt.yml","Tech Debt","en-us/blog/tech-debt.yml","en-us/blog/tech-debt",{"_path":4889,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4890,"content":4896,"config":4901,"_id":4903,"_type":14,"title":4904,"_source":16,"_file":4905,"_stem":4906,"_extension":19},"/en-us/blog/ten-reasons-why-your-business-needs-ci-cd",{"title":4891,"description":4892,"ogTitle":4891,"ogDescription":4892,"noIndex":6,"ogImage":4893,"ogUrl":4894,"ogSiteName":685,"ogType":686,"canonicalUrls":4894,"schema":4895},"10 Reasons why your business needs CI/CD","Want to know why you should consider using CI/CD? Learn more here about the many business benefits of adopting a CI/CD workflow for you and your organization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663779/Blog/Hero%20Images/cicd-2018_blogimage.jpg","https://about.gitlab.com/blog/ten-reasons-why-your-business-needs-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"10 Reasons why your business needs CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-02-15\",\n      }",{"title":4891,"description":4892,"authors":4897,"heroImage":4893,"date":4898,"body":4899,"category":1040,"tags":4900},[1037],"2022-02-15","\nThere’s no escape: Your company is in the software business, even if it’s not. \n\nCompetitors, customers, investors, and employees are all demanding updated software on a regular basis, alongside whatever products your organization creates.\n\nSo embrace the reality (and [DevOps](/topics/devops/)) and invest in creating the most efficient continuous integration and delivery pipelines possible. Not sure how to sell this strategy to management? Start by pointing out it’s likely your closest competitor is already taking advantage of [continuous integration/continous delivery](/topics/ci-cd/)(CI/CD). And if you need more ammunition, here are 10 reasons why your business needs CI/CD.\n\n## What is CI/CD?\n\nCI/CD is a two-step process that dramatically streamlines code development and delivery using the power of automation. CI makes developer tasks like source code integration and version control more efficient so software can get into production faster. CD automates software testing and deployment. Together, CI/CD is a powerful and unmatched engine of modern software development and it has untold benefits for businesses.\n\n## What are the CI/CD benefits for business?\n\nCI/CD has numerous benefits for business. Here are 10 reasons to adopt CI/CD: \n\n* Ensure superior code quality\n\nIn our [2021 Global DevSecOps Survey](/developer-survey/), participants told us the number one reason to do DevOps is for code quality and, of course, the number one process teams need for DevOps is CI/CD. Because CI/CD pipelines offer test automation, developers can know about code problems nearly in real time. That concept of “failing fast” means teams aren’t wasting time or resources with buggy code, and devs aren’t plagued with endless “fix” requests when they’ve moved on to other projects. Time is saved, money is saved, and developers aren’t endlessly context switching… win, win, win.\n\n* Deliver faster with an accelerated release rate\n\nSkeptics about the benefits of CI/CD need only hear about global financial firm Goldman Sach’s success story: It’s Technology Division went from [one code build every two weeks to over 1,000 builds per day](/customers/goldman-sachs/). A unified CI/CD pipeline is like a turbo engine when it comes to boosting the rate of software releases. The faster code is released, the more new code can be developed, and then released, ad infinitum. The business bottom line: Expensive developer resources aren’t sitting idle when a successful CI/CD pipeline is in play.\n\n* CI/CD pipelines: Automation reduces the cost\n\nAnytime a human does not have to intervene in the software development process, time, and thus money, are saved. That’s why automation is the underpinning to successful DevOps practices. CI/CD automates the handoffs, the source code management, the version control system, the deployment mechanisms, and, of course, so much of the testing. \n\nOf all those, [testing](/blog/want-faster-releases-your-answer-lies-in-automated-software-testing/) is arguably the most important. In our 2021 survey, testing was identified as the number one reason releases were delayed. Not only do delayed releases impact the business from a cost, branding, public relations, and even a reputation perspective, they are deadly to businesses relying on speedy time-to-market. Historically software testing was manual and incredibly time-consuming, which is why companies only released new code once or twice a year. In today’s world, companies have to release all the time, and automated software testing is critical to making that possible.\n\n* Fault isolation\n\nBefore DevOps and CI/CD gained traction in software development, development teams would know there was an issue with code, but would struggle to know exactly *where* the problem was happening. CI/CD and its automated testing has changed that. Developers can easily identify and then isolate code faults, dramatically improving productivity. \n\n* Simplified rollback\n\nA CI/CD pipeline gives developers the power to fail fast and recover even faster. It’s a simple process to push code into production and, if there are issues, simply roll it back. The ability to easily rollback code saves teams time, energy, and resources and leads to faster fixes of problem code. \n\n* Continuous feedback\n\nA unified CI/CD process, operating as part of a DevOps platform, gives everyone on the team – including business stakeholders – a way to see what’s happening, where it’s happening, and what might be going wrong. This sounds like a simple thing, but in reality, a single window into software development is almost revolutionary.\n\nIn the past, there were simply _so many tools_ in play that a project manager might have to look in a number of places, and ask a number of people, to get status updates. Developers and operations pros fared no better. Obviously that was a waste of time and resources, particularly when problems arose. \n\n* Optimum transparency and accountability\n\nThanks to continuous feedback, a CI/CD pipeline makes the entire software development process completely transparent to the business side. Product managers can check project status in a glance and track accountability as needed. \n\n* Improved mean time to resolution (MTTR)\n\nThanks to the visibility provided by a CI/CD pipeline, DevOps teams see issues quickly and can fix them fast. The ability to rapidly resolve problems lies at the heart of a key development metric: mean time to resolution, or MTTR. The better the MTTR, the more efficiently the DevOps team is working and the more quickly software can be released; in other words, MTTR has a direct effect on a business’s bottom line. \n\n* Monitoring metrics data\n\nTeams and the business side need to know how code is functioning in the real world, but in traditional software development practices monitoring metrics are often absent. In an ideal world, teams would know there was a code problem and roll it back long before end users realized it. A CI/CD pipeline makes that “ideal world” a reality by [delivering continuous feedback on a variety of metrics](https://about.gitlab.com/topics/ci-cd/continuous-integration-metrics/). Access to metrics data is more than just a time-saver, however, as no organization wants to be associated with bug-ridden code and applications that don’t perform well. \n\n* Reduction of non-critical defects in backlog\n\nBy now it’s clear CI/CD is a time and money saver, so much so that it gives developers time to work on things they wouldn’t normally be able to, such as going back to fix older code and make it cleaner and more efficient. The idea that devs cannot only tackle the backlog (it’s called a backlog for a reason after all – who has time for this?), but also work on non-critical defects, is a game-changer brought to teams by DevOps and CI/CD.\n",[721,9,873],{"slug":4902,"featured":6,"template":700},"ten-reasons-why-your-business-needs-ci-cd","content:en-us:blog:ten-reasons-why-your-business-needs-ci-cd.yml","Ten Reasons Why Your Business Needs Ci Cd","en-us/blog/ten-reasons-why-your-business-needs-ci-cd.yml","en-us/blog/ten-reasons-why-your-business-needs-ci-cd",{"_path":4908,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4909,"content":4915,"config":4921,"_id":4923,"_type":14,"title":4924,"_source":16,"_file":4925,"_stem":4926,"_extension":19},"/en-us/blog/test-all-the-things-gitlab-ci-docker-examples",{"title":4910,"description":4911,"ogTitle":4910,"ogDescription":4911,"noIndex":6,"ogImage":4912,"ogUrl":4913,"ogSiteName":685,"ogType":686,"canonicalUrls":4913,"schema":4914},"Test all the things in GitLab CI with Docker by example","Running tests is easier than you think – guest author Gabriel Le Breton shares his presentation about testing everything automatically with GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680261/Blog/Hero%20Images/test-all-the-things-in-gitlab-ci-with-docker-by-example.jpg","https://about.gitlab.com/blog/test-all-the-things-gitlab-ci-docker-examples","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Test all the things in GitLab CI with Docker by example\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabriel Le Breton\"}],\n        \"datePublished\": \"2018-02-05\",\n      }",{"title":4910,"description":4911,"authors":4916,"heroImage":4912,"date":4918,"body":4919,"category":718,"tags":4920},[4917],"Gabriel Le Breton","2018-02-05","\n\nDo you write tests? Or do you skip them because it’s too complicated to run? Or maybe developers on your team just don’t care? You should take a few minutes and set up CI so you can enforce good practices. Good news, you can test [all the things](http://knowyourmeme.com/memes/all-the-things) automagically in [GitLab CI/CD](/solutions/continuous-integration/) with Docker and very little effort 🤘\n\n\u003C!-- more -->\n\nI recently gave a presentation at the [SagLacIO](http://saglac.io/) about [GitLab CI/CD](/solutions/continuous-integration/).\n\n## Getting started\n\nFirst, you’ll need an account at [GitLab.com](https://gitlab.com/). If you don’t already have one, you can open an account with no problem. [GitLab’s free tier](/stages-devops-lifecycle/) gives you a ton of features, unlimited free hosted repositories, 2,000 CI build minutes per month, etc. You can even use your own task runners in case you bust that limit.\n\n### Useful links\n\n- [GitLab.com](https://gitlab.com/)\n- [GitLab CI/CD documentation](https://docs.gitlab.com/ee/ci/) 📗\n- [.gitlab-ci.yml documentation](https://docs.gitlab.com/ee/ci/yaml/) 📕\n- [.gitlab-ci.yml linter: gitlab.com/ci/lint](https://gitlab.com/ci/lint/) ✅\n- [gitlab-ci nodejs example project](https://gitlab.com/gableroux/gitlab-ci-example-nodejs)\n- [gitlab-ci Docker example project](https://gitlab.com/gableroux/gitlab-ci-example-docker)\n- [gitlab-ci django example project](https://gitlab.com/gableroux/gitlab-ci-example-django)\n- [Unity3D Docker project](https://gitlab.com/gableroux/unity3d) running in gitlab-ci and published to [Docker Hub](https://hub.docker.com/r/gableroux/unity3d/)\n- [How to publish Docker images to Docker Hub from gitlab-ci on Stack Overflow](https://stackoverflow.com/questions/45517733/how-to-publish-docker-images-to-docker-hub-from-gitlab-ci)\n\n## Here go the slides\n\nScroll through the slides from my presentation on GitLab CI/CD at SagLacIO, you’ll have fun 🤘\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://docs.google.com/presentation/d/10835yig54EbR_OQcxSXURkPk_0zkhLxaWHdRdXb-yWw/embed?start=false&amp;loop=false&amp;delayms=3000\" frameborder=\"0\" width=\"1280\" height=\"749\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\nIf you have suggestions, feel free to poke me or [open an issue](https://github.com/GabLeRoux/gableroux.github.io/issues).\n\n *[Test all the things in GitLab CI with Docker by example](https://gableroux.com/saglacio/2018/01/16/test-all-the-things-in-gitlab-ci-with-docker-by-example/) was originally published on gableroux.com.*\n\n *Cover photo by [Federico Beccari](https://unsplash.com/photos/ahi73ZN5P0Y?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)*\n {: .note}\n",[763,9],{"slug":4922,"featured":6,"template":700},"test-all-the-things-gitlab-ci-docker-examples","content:en-us:blog:test-all-the-things-gitlab-ci-docker-examples.yml","Test All The Things Gitlab Ci Docker Examples","en-us/blog/test-all-the-things-gitlab-ci-docker-examples.yml","en-us/blog/test-all-the-things-gitlab-ci-docker-examples",{"_path":4928,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4929,"content":4935,"config":4940,"_id":4942,"_type":14,"title":4943,"_source":16,"_file":4944,"_stem":4945,"_extension":19},"/en-us/blog/three-teams-left-jenkins-heres-why",{"title":4930,"description":4931,"ogTitle":4930,"ogDescription":4931,"noIndex":6,"ogImage":4932,"ogUrl":4933,"ogSiteName":685,"ogType":686,"canonicalUrls":4933,"schema":4934},"3 Teams left Jenkins: Here’s why","How three different teams – Alteryx, ANWB, and EAB – shifted away from Jenkins for smoother sailing with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671932/Blog/Hero%20Images/jenkins-to-gitlab-sailboat.jpg","https://about.gitlab.com/blog/three-teams-left-jenkins-heres-why","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 Teams left Jenkins: Here’s why\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brein Matturro\"}],\n        \"datePublished\": \"2019-07-23\",\n      }",{"title":4930,"description":4931,"authors":4936,"heroImage":4932,"date":4937,"body":4938,"category":1040,"tags":4939},[823],"2019-07-23","\nAs many companies know, continuous integration and build processes are challenging. Complex tool\nintegrations, pieced-together pipelines, and overall system breakdowns are time consuming for\neven the most experienced teams. The longer it takes for system recovery, the more costly it\nbecomes, creating more risk for the organization as a whole. Competitive companies are always on\nthe lookout for better solutions and they're increasingly turning to GitLab to do just that.\n\nThree companies – Alteryx, ANWB, and EAB – all experienced unique challenges with Jenkins.\nWe highlight how each of these teams made the successful move to\n[GitLab from Jenkins](/solutions/jenkins/). Learn how each team\naccelerated deployment, improved CI/CD pipelines, created developer transparency, and\nalleviated toolchain stressors after making the switch to GitLab.\n\n## Alteryx: Builds down from 3 hours to 30 minutes\n\nAlteryx, a prominent end-to-end analytics platform, was using a legacy system with Jenkins\nthat was older, clunky, and difficult to manage. The team was looking to modernize their architecture\nand to improve their overall software development lifecycle.\n\nThey turned to GitLab because it offers many solutions in one tool. With GitLab, the Alteryx team is now\ncapable of managing source code, CI/CD, code reviews, and security scanning all in one place.\nA build that took three hours with Jenkins is now just 30 minutes in GitLab.\n\nAs Alteryx continues to grow in the analytics space, GitLab will continue to add new features\nto support the company's expanding needs. Learn more about [Alteryx’s journey](/customers/alteryx/).\n\n## ANWB: Increased deployments\n\nWith over 4.4 million members, ANWB offers services for credit cards, bicycle maintenance,\ncar sales, and travel throughout the Netherlands. Both the mobile and web development\nteams have their hands full with popular offerings like mapping and driver intelligence services.\n\nANWB was struggling with an outdated toolchain that included Jenkins version 1 as a build server.\nThe company wanted to speed up development, eliminate isolated and outdated processes and give\nits teams autonomy.\n\nWith GitLab, ANWB can now manage separate teams, increase deployments, and support a culture\nwhere everyone contributes freely to colleagues' code repositories. ANWB has plans to move toward a\ncloud-centric framework and GitLab has helped to pave that road. Learn more about [ANWB’s path to success](/customers/anwb/).\n\n## EAB: \"Quality first\" culture\n\nServing over 1,500 schools, colleges, and universities, EAB uses data analytics and transformative\nmeasures to help students stay enrolled in education. The EAB team had to rely on several tools,\nincluding Jenkins, which made continuous integration overly complex and time consuming.\nDevelopers wanted to consolidate their various tools to create faster builds with much less maintenance.\n\nEAB initially turned to GitLab because of our regular feature releases and [tiered (and affordable) pricing](/pricing/).\nThe EAB development team soon realized they could have a steady pace of\nbuild releases without having to use multiple tools to make it happen. In just six months, workflow increased\nand the company plans to continue to roll out a \"quality first\" culture using GitLab as a guide.\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nWatch the [Migrating from Jenkins to GitLab](https://www.youtube.com/watch?v=RlEVGOpYF5Y) demo\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n\nCover image by [Fab Lentz](https://unsplash.com/@fossy) on [Unsplash](https://unsplash.com)\n{: .note}\n",[763,828,9],{"slug":4941,"featured":6,"template":700},"three-teams-left-jenkins-heres-why","content:en-us:blog:three-teams-left-jenkins-heres-why.yml","Three Teams Left Jenkins Heres Why","en-us/blog/three-teams-left-jenkins-heres-why.yml","en-us/blog/three-teams-left-jenkins-heres-why",{"_path":4947,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4948,"content":4954,"config":4958,"_id":4960,"_type":14,"title":4961,"_source":16,"_file":4962,"_stem":4963,"_extension":19},"/en-us/blog/three-yaml-tips-better-pipelines",{"title":4949,"description":4950,"ogTitle":4949,"ogDescription":4950,"noIndex":6,"ogImage":4951,"ogUrl":4952,"ogSiteName":685,"ogType":686,"canonicalUrls":4952,"schema":4953},"3 YAML tips for better pipelines","Learn how to get the most out of your YAML configs.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681626/Blog/Hero%20Images/yaml-tips.jpg","https://about.gitlab.com/blog/three-yaml-tips-better-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 YAML tips for better pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-10-01\",\n      }",{"title":4949,"description":4950,"authors":4955,"heroImage":4951,"date":1776,"body":4956,"category":1040,"tags":4957},[715],"At GitLab, we’re fans of YAML. But for all of its benefits, we’d be lying if\nwe said YAML hasn’t caused its fair share of headaches, too.\n\n\n[YAML](https://yaml.org/) is used industry-wide for declarative\nconfiguration. YAML offers flexibility and simplicity, as long as you know\nthe rules and limitations. Since YAML is platform-agnostic, knowing best\npractices around YAML configurations is a transferable skillset in a cloud\nnative world.\n\n\n## What are the benefits of YAML?\n\n\nYAML is a data serialization language designed to be human-friendly. YAML is\neasy to use in a text editor, has a simple syntax that works across\nprogramming languages, and can store a lot of important configuration data\n(typically in a .yml or .yaml file).\n\n\n[YAML is data-oriented](https://blog.stackpath.com/yaml/) and has features\nderived from Perl, C, HTML, and others.\n\n\nBecause YAML is a superset of JSON, it has built-in advantages including\ncomments, self-referencing, and support for complex data types.\n\n\nA [YAML file uses declarative\nconfiguration](https://www.codeproject.com/Articles/1214409/Learn-YAML-in-five-minutes)\nto describe a variety of structures, such as API data structures and even\ndeployment instructions for virtual machines and containers, to name a few.\n\n\nYAML is comprehensive, widely-used, and works in every type of development\nenvironment.\n\n\n## YAML tip #1: Let other tools do the formatting for you\n\n\nYAML is one of those languages where it’s minimalism is both a blessing and\na curse, depending on who you ask. It also relies on the syntactically\nsignificant whitespace that is a source of [heated\ndebate](https://wiki.c2.com/?SyntacticallySignificantWhitespaceConsideredHarmful)\namong developers. For a language where formatting is a king, what can\ndevelopers do to make sure they stay within the rules without having to\nanalyze every single space and indentation?\n\n\nMany text editors and platforms have plugins or built-in tools to check YAML\nconfiguration syntax for you.\n\n\n*   [Atom](http://atom.io/), the open source text editor, comes with a\ndefault YAML mode.\n\n*   [Red Hat YAML\nsupport](https://marketplace.visualstudio.com/items?itemName=redhat.vscode-yaml)\nprovides YAML Language and Kubernetes syntax support to the [VS Code\neditor](https://code.visualstudio.com/).\n\n*   [OnlineYamlTools](https://onlineyamltools.com/edit-yaml) has a web-based\neditor that will do in a pinch. It also links to other helpful options such\nas converting JSON to YAML, etc.\n\n*  \n[SlickEdit](https://www.slickedit.com/products/slickedit/448-the-most-powerful-yaml-editor-in-the-world#:~:text=SlickEdit%20%2D%20The%20most%20powerful%20YAML,source%20diff%2C%20and%20much%20more.)\nis the self-described \"most powerful YAML editor in the world\" and has some\nhelpful features to back it up (at a cost). SlickEdit offers a free trial.\n\n*   [Pretty YAML](https://packagecontrol.io/packages/Pretty%20YAML) is a\nplugin for Sublime Text 2 and 3 that allows you to format YAML files.\n\n\n[Linters](https://sourcelevel.io/blog/what-is-a-linter-and-why-your-team-should-use-it)\nare used in the development process to analyze code for stylistic and\nformatting errors, among other things. Teams adopt linters and other static\ntools by integrating them into their integrated development environment\n(IDE) of choice, and/or by running them as an additional step in their\ncontinuous integration (CI).\n\n\nIn GitLab, we have a [CI\nlint](https://docs.gitlab.com/ee/ci/lint.html#validate-basic-logic-and-syntax)\nthat checks the syntax of your CI YAML configuration that also runs some\nbasic logical validations.\n\n\nTo use the CI lint, paste a complete CI configuration (`.gitlab-ci.yml` for\nexample) into the text box and click `Validate`:\n\n\n![GitLab CI lint](https://docs.gitlab.com/ee/ci/img/ci_lint.png)\n\n\n## YAML tip #2: Keep it simple\n\n\nIt’s easy to overwhelm the minimalism of a YAML file by including too many\ndetails, or by being inconsistent with formatting. When it comes to YAML,\nless is often more.\n\n\nIt isn’t necessary to specify every single attribute. `Job timeout` is an\nexample of an attribute that can be left out, since this is something that\nis sometimes specified elsewhere. An example in GitLab is\n[interruptible](https://docs.gitlab.com/ee/ci/yaml/#interruptible), which is\nused to indicate that a job should be canceled if made redundant by a newer\npipeline run. Since this defaults to `false` it’s not always necessary to\ninclude it.\n\n\nSome people indent gratuitously when writing YAML to help themselves\nvisualize large chunks of data. To better visualize how data works together,\nit might be helpful to create a \"pseudo-config\" before committing the code\nto YAML. On the [Red Hat blog](https://www.redhat.com/sysadmin/yaml-tips), a\npseudo-config is described as pseudo-code where you don't have to worry\nabout structure or indentation, parent-child relationships, inheritance, or\nnesting. Just write the data down as you understand it.\n\n\n![Red Hat pseudo\nconfig](https://www.redhat.com/sysadmin/sites/default/files/inline-images/pseudoyaml.jpg)\n\n\nOnce you understand how the data correlates, then you can commit it to YAML.\n\n\nRegardless of how you define simplicity in your workflow, try to keep YAML\nconfigs uncluttered and include only the necessary data. And if you’re not\nsure what data is necessary, write out a pseudo-config to help you visualize\nit.\n\n\n\n\n## YAML tip #3: Reuse config when possible\n\n\nStarting from scratch is a lot of wasted effort, and YAML is no exception.\nOne of the best parts of YAML is its reusabilty, and reusing config is a way\nto keep files consistent within an organization.\n\n\nOne way to [avoid duplicated\nconfiguration](https://docs.gitlab.com/ee/ci/yaml/#include) is by using the\n`include` keyword, which allows the inclusion of external YAML files. For\nexample, global default variables for all projects that don’t need to be\nmodified for every file. The `include` keyword helps to break down a YAML\nconfiguration into multiple files and boosts readability, especially for\nlong files. It’s also possible to have template files stored in a central\nrepository and projects included in their configuration files.\n\n\n`extends` is a great way to reuse some YAML config in multiple places, for\nexample:\n\n\n```\n\n.image_template:\n  image:\n    name: centos:latest\n\ntest:\n  extends: .image_template\n  script:\n    - echo \"Testing\"\n\ndeploy:\n  extends: .image_template\n  script:\n    - echo \"Deploying\"\n```\n\n\nYAML has a handy feature called\n[anchors](https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html#anchors),\nwhich lets you easily duplicate content across your document. Anchors can be\nused to duplicate/inherit properties, and is a perfect example to be used\nwith [hidden jobs](https://docs.gitlab.com/ee/ci/jobs/#hide-jobs) to provide\ntemplates for your jobs. When there is duplicate keys, GitLab will perform a\nreverse deep merge based on the keys.\n\n\n```\n\n.job_template: &job_definition  # Hidden key that defines an anchor named\n'job_definition'\n  image: ruby:2.6\n  services:\n    - postgres\n    - redis\n\ntest1:\n  &lt;\u003C: *job_definition           # Merge the contents of the 'job_definition' alias\n  script:\n    - test1 project\n\ntest2:\n  &lt;\u003C: *job_definition           # Merge the contents of the 'job_definition' alias\n  script:\n    - test2 project\n```\n\n\nOne big caveat to anchors: You can’t use anchors across multiple files when\nleveraging the `include` feature.\n\n\nInstead of building pipelines from scratch, [CI/CD pipeline\ntemplates](/blog/get-started-ci-pipeline-templates/) simplify the\nprocess by having parameters already built-in. At GitLab, pipelines are\ndefined in a `gitlab-ci.yml` file. Because our CI/CD templates come in over\n30 popular languages, chances are good that we have the template you need to\nget started in our [CI template\nrepository](https://gitlab.com/gitlab-org/gitlab/tree/master/lib/gitlab/ci/templates).\n\n\nTemplates can be modified and are created to fit many use cases. To see a\nlarge `.gitlab-ci.yml` file used in an enterprise, see the [.gitlab-ci.yml\nfile for\nGitLab](https://gitlab.com/gitlab-org/gitlab/blob/master/.gitlab-ci.yml).\n\n\nWhether you’re a YAML lover, YAML novice, or using YAML against your will,\nknowing some tips and tricks can make your development process a better\nexperience. Do you have any YAML tips or recommendations? Feel free to\ncomment down below.\n\n\nCurious about GitLab CI/CD and want to show off your YAML skills? [Try\nGitLab free](/free-trial/).\n\n\nCover image by [Harits Mustya\nPratama](https://unsplash.com/@haritsmustya?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non\n[Unsplash](https://unsplash.com/s/photos/greenhouse?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n\n{: .note}\n\n\n## Related content\n\n[GitLab CI/CD pipeline configuration\nreference](https://docs.gitlab.com/ee/ci/yaml)\n\n\n[Unlock better DevOps with GitLab\nCI/CD](https://about.gitlab.com/blog/better-devops-with-gitlab-ci-cd/)\n\n\n[Pipeline\nefficiency](https://docs.gitlab.com/ee/ci/pipelines/pipeline_efficiency.html)\n",[9,721],{"slug":4959,"featured":6,"template":700},"three-yaml-tips-better-pipelines","content:en-us:blog:three-yaml-tips-better-pipelines.yml","Three Yaml Tips Better Pipelines","en-us/blog/three-yaml-tips-better-pipelines.yml","en-us/blog/three-yaml-tips-better-pipelines",{"_path":4965,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4966,"content":4971,"config":4977,"_id":4979,"_type":14,"title":4980,"_source":16,"_file":4981,"_stem":4982,"_extension":19},"/en-us/blog/top-10-gitlab-technical-blogs-of-2023",{"title":4967,"description":4968,"ogTitle":4967,"ogDescription":4968,"noIndex":6,"ogImage":1281,"ogUrl":4969,"ogSiteName":685,"ogType":686,"canonicalUrls":4969,"schema":4970},"Top 10 GitLab technical blogs of 2023","2023 was a big year! Catch up on expert insights into DevSecOps, AI, CI/CD, and more.","https://about.gitlab.com/blog/top-10-gitlab-technical-blogs-of-2023","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top 10 GitLab technical blogs of 2023\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2024-01-09\",\n      }",{"title":4967,"description":4968,"authors":4972,"heroImage":1281,"date":4974,"body":4975,"category":741,"tags":4976},[4973],"Sandra Gittlen","2024-01-09","2023 brought fresh insights from experts across GitLab and beyond —  all of them focused on the challenges and opportunities facing DevSecOps teams. From Lockheed Martin to CARFAX, organizations are trying to understand and unlock the power of technologies such as artificial intelligence (AI), CI/CD, security automation, and more. Our experts provided tips, best practices, and tutorials to use throughout the software development lifecycle.\n\nHere are the top 10 technical blogs from what was an incredible year in DevSecOps innovation.\n\n**1. [Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment](https://about.gitlab.com/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment/)**\nLooking for a smooth transition from Jenkins to GitLab? Follow this step-by-step tutorial to learn how GitLab's integrated CI/CD capabilities help deliver high-quality software faster.\n\n**2. [U.S. Navy Black Pearl: Lessons in championing DevSecOps](https://about.gitlab.com/blog/u-s-navy-black-pearl-lessons-in-championing-devsecops/)**\nSigma Defense's director of engineering details what it's like to manage the U.S. Navy's Black Pearl, which uses GitLab as its DevSecOps platform. The DevSecOps champion relays his experience implementing DevSecOps and the benefits of that decision.\n\n**3. [Quickstart guide for GitLab Remote Development workspaces](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/)**\nEnabling developers to work in their preferred environments empowers DevSecOps teams to build and deliver software more efficiently. With these quickstart instructions, developers can create a workspace, use the Web IDE Terminal to install dependencies or start their server, and view their running application.\n\n**4. [Introducing the GitLab CI/CD Catalog Beta](https://about.gitlab.com/blog/introducing-the-gitlab-ci-cd-catalog-beta/)**\nCI/CD catalogs are a game-changer, allowing developers to discover, integrate, and share pre-existing CI/CD components with ease. This tutorial shows how to get the most from this new DevSecOps platform feature.\n\n**5. [Combine GitLab Flow and GitLab Duo for a workflow powerhouse](https://about.gitlab.com/blog/gitlab-flow-duo/)**\nGitLab Flow and GitLab Duo can help organizations achieve significant improvements in end-to-end workflow efficiency that can lead to higher levels of productivity, deployment frequency, code quality and overall security, and production resiliency and availability. Find out how with this step-by-step guide.\n\n**6. [Efficient DevSecOps workflows: Hands-on python-gitlab API automation](https://about.gitlab.com/blog/efficient-devsecops-workflows-hands-on-python-gitlab-api-automation/)**\nThe python-gitlab library is a useful abstraction layer for the GitLab API. Dive into hands-on examples and best practices in this tutorial.\n\n**7. [Building GitLab with GitLab: Why there is no MLOps without DevSecOps](https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops/)**\nAt GitLab, we believe in the power of MLOps, especially when combined with DevSecOps. So follow along as our data scientists adopt DevSecOps practices and enjoy the benefits of automation, repeatable workflows, standardization, and automatic provisioning of infrastructure.\n\n**8. [Explore the Dragon Realm: Build a C++ adventure game with a little help from AI](https://about.gitlab.com/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions/)**\nReaders are invited to create a mystical world while learning how to integrate AI into their coding environment. This tutorial demonstrates how to use GitLab Duo Code Suggestions to create a text-based adventure game, including magical locations to visit and items to procure, using C++. \n\n**9. [How GitLab's Red Team automates C2 testing](https://about.gitlab.com/blog/how-gitlabs-red-team-automates-c2-testing/)**\nThe GitLab Red Team conducts security exercises that simulate real-world threats. They apply professional development practices to using the same open source C2 tools as threat actors. In this tutorial, the GitLab Red Team shares how they implement continuous testing for the Mythic framework, their design philosophy, and a public project that can be forked for use by other Red Teams.\n\n**10. [Building GitLab with GitLab: How GitLab.com inspired Dedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/)**\nThe design of GitLab Dedicated, our single-tenancy SaaS version of the DevSecOps platform, came from the lessons learned while building GitLab.com. In this peek behind the curtains, learn the considerations that sparked different decisions regarding automation, databases, monitoring, availability, and more – and what the outcome was.\n\nSign up for the GitLab newsletter using the form to the right to receive the latest blogs right in your inbox.\n",[851,9,495,696,697,917],{"slug":4978,"featured":91,"template":700},"top-10-gitlab-technical-blogs-of-2023","content:en-us:blog:top-10-gitlab-technical-blogs-of-2023.yml","Top 10 Gitlab Technical Blogs Of 2023","en-us/blog/top-10-gitlab-technical-blogs-of-2023.yml","en-us/blog/top-10-gitlab-technical-blogs-of-2023",{"_path":4984,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4985,"content":4991,"config":4996,"_id":4998,"_type":14,"title":4999,"_source":16,"_file":5000,"_stem":5001,"_extension":19},"/en-us/blog/trends-in-test-automation",{"title":4986,"description":4987,"ogTitle":4986,"ogDescription":4987,"noIndex":6,"ogImage":4988,"ogUrl":4989,"ogSiteName":685,"ogType":686,"canonicalUrls":4989,"schema":4990},"3 Trends in test automation","Faster deployments, fewer bugs, better user experiences – see the latest trends in test automation and what they're bringing to the table.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663662/Blog/Hero%20Images/trends-in-test-automation.jpg","https://about.gitlab.com/blog/trends-in-test-automation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 Trends in test automation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-05-01\",\n      }",{"title":4986,"description":4987,"authors":4992,"heroImage":4988,"date":4993,"body":4994,"category":1040,"tags":4995},[715],"2019-05-01","\nAutomation is becoming a powerful tool in every industry.\nWith the pace of development at breakneck speed, [test automation](/topics/devops/devops-test-automation/) is a big asset in deploying applications quickly.\nThe volume and complexity of testing environments mean that machines are well-suited for the job, and a modern QA strategy is all about leveraging that automation effectively.\n\n[QASymphony recently surveyed testers and QA leaders](https://www.qasymphony.com/blog/test-automation-trends-infographic/) at mid-size and large enterprises and found that a significant number of respondents expect to be making a big leap towards test automation in the next year:\nAlmost half expect to be automating more than 50 percent in that time.\nThe test automation tool landscape is growing more complex, and 83 percent of organizations are using open source tools.\n\n## 1. Continuous testing\n\nIn traditional environments, testing gets completed at the end of a development cycle.\nAs more teams move toward a [DevOps](/topics/devops/) and [continuous delivery](/topics/ci-cd/) model in which software is constantly in development, leaving testing until the end can be a huge liability.\nIn the time between a project starting and going to testing, master files could have been changed thousands of times.\nWho knows what kinds of bugs can pop up over months of development?\nThis leads to either updates stuck in testing for far too long or deployments filled with bugs – neither of which is good.\nThat’s where continuous testing comes in.\n\nContinuous testing starts at the beginning.\nEach milestone along the way serves as a quality gate, [baking in excellence at each stage of the software development process](https://techbeacon.com/app-dev-testing/state-test-automation-7-key-trends-watch).\nAs each phase clears, more testing happens as needed.\nImplementing continuous testing methodologies is _already_ the biggest trend in test automation, but some organizations that embark on their DevOps journeys struggle with it.\n\nSubu Baskaran, senior product manager for Sencha, says that despite the desire to test early in the cycle, software development teams that are still maintaining legacy applications find it hard to go back and write unit or end-to-end tests:\n\n>\"The millions of lines of code make it extremely difficult for teams to think about unit testing, as that will severely hamper new feature development. Also, legacy applications have inherent complexities that make end-to-end testing very slow, vague, and brittle. [Hence, teams that maintain legacy applications resort to manual testing.](https://techbeacon.com/app-dev-testing/state-continuous-testing-its-journey-not-destination)\"\n\n## 2. Concurrent DevOps\n\nCode quality and speed go hand in hand, and teams must be able to make use of parallelization to keep up the pace.\nSplitting work across multiple servers has never been easier, and organizations will continue to expand their concurrent DevOps approach.\n\nYou could have multiple physical machines to handle the load but [VMs can be a more economical option for automation parallelization](https://techbeacon.com/app-dev-testing/parallelizing-test-automation-read-first).\nWhether those VMs are on premises or cloud-based largely depends on the cost and your company's ability to embrace the cloud.\n\nYou could also work with cloud partners, companies that host cloud-based execution environments\nfor testing and automation.\n\nAutoscaling is one way that teams can reduce the costs associated with running these concurrent jobs.\n[Autoscaling runners](/releases/2016/03/29/gitlab-runner-1-1-released/) split this work across multiple servers and spin up or down automatically to process queues – so developers don’t have to wait on builds and teams use as much capacity as needed.\nThis user [built out a CI testing pipeline using GitLab](https://medium.freecodecamp.org/4-steps-to-build-an-automated-testing-pipeline-with-gitlab-ci-24ccab95535e) that allowed for more effective bug catching, and more DevOps teams will be using these methods to automate their testing environments in years to come.\n\n## 3. AI and machine learning\n\nAt its core, machine learning is a pattern-recognition technology, [the main purpose of which is to make machines learn without being explicitly programmed](https://hackernoon.com/why-ai-ml-will-shake-software-testing-up-in-2019-b3f86a30bcfa).\nWhat makes this such an important trend in test automation is that it can make testing more predictive and reliable.\nWhile Selenium is still the standard for creating testing scripts, it requires a high level of programming skill to maintain.\nAutomation tools like Mabl, [TestCraft](https://www.testcraft.io/), Testim.io, and AutonomiQ are just some of the few incorporating AI and machine learning into test automation.\n\nDan Belcher, co-founder of testing tool company Mabl, and his team [developed an ML testing algorithm that can adapt to changes in frontend elements](https://techbeacon.com/app-dev-testing/how-ai-changing-test-automation-5-examples).\n\"Although Selenium is the most broadly used framework, the challenge with it is that it's pretty rigidly tied to the specific elements on the front end. Because of this, script flakiness can often arise when you make what seems like a pretty innocent change to a UI.\" he explains.\n\"One of the things that we did at the very beginning of creating Mabl was to develop a much smarter way of referring to frontend elements in our test automation so that those types of changes don't actually break your tests.\"\n\nAI and machine learning make it possible to go through millions of lines of code and identify patterns.\nBut what happens to the human testers? QA automation means that testers can devote more time to superior user experiences – the tasks that machines are _not_ always well-suited for.\nThe role of testers is now [ensuring that quality testing processes are being followed](https://www.qasymphony.com/blog/managing-testing-teams/), so it’s more about oversight than conducting actual tests.\nModern QA can be that bridge for beautiful user experiences that are intuitive and appealing.\nWith the volume of applications being deployed every day, having a great user experience is a way to stand out in a sea of apps.\n\n## These trends in test automation are just the tip of the iceberg\n\nThere is no shortage of exciting things happening: more focus on JavaScript testing, improvements in testing across devices, comprehensive testing dashboards, as well as Selenium-free options.\nThe testing automation landscape is full of new solutions, but none of them is viable in an outdated legacy environment.\n\nManual testing reduces application development speed and threatens code quality.\nThese two disadvantages are growth killers, especially in such a competitive development landscape.\nTest automation makes it possible for testers to use their skills where they add more business value: Creating great user experiences.\nLegacy applications can’t tap into all of these test automation capabilities because they aren’t supported.\nOrganizations forced to manually test their code are being left in the dust by those who automate.\n\nThe advantage of using a solution like GitLab is that we can incorporate a variety of continuous testing solutions.\nCustomers have integrated us with SaaS-based testing solutions or even their own homegrown Selenium grids.\nWe also integrate with JavaScript platforms like Cypress.io, and help teams create continuous integration pipelines.\n\nAre you ready to explore these trends in test automation but legacy applications are holding you back?\n\n[Just commit.](/blog/application-modernization-best-practices/)\n{: .alert .alert-gitlab-purple .text-center}\n\nCover image by [Mimi Thian](https://unsplash.com/photos/ZKBzlifgkgw?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/%22developers%22?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[721,9,875],{"slug":4997,"featured":6,"template":700},"trends-in-test-automation","content:en-us:blog:trends-in-test-automation.yml","Trends In Test Automation","en-us/blog/trends-in-test-automation.yml","en-us/blog/trends-in-test-automation",{"_path":5003,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5004,"content":5010,"config":5016,"_id":5018,"_type":14,"title":5019,"_source":16,"_file":5020,"_stem":5021,"_extension":19},"/en-us/blog/tutorial-advanced-use-case-for-gitlab-pipeline-execution-policies",{"title":5005,"description":5006,"ogTitle":5005,"ogDescription":5006,"noIndex":6,"ogImage":5007,"ogUrl":5008,"ogSiteName":685,"ogType":686,"canonicalUrls":5008,"schema":5009},"Tutorial: Advanced use case for GitLab Pipeline Execution Policies","Learn how new GitLab Ultimate functionality can enforce a standardized pipeline across an organization for improved compliance.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098083/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_397632156_3Ldy1urjMStQCl4qnOBvE0_1750098083312.jpg","https://about.gitlab.com/blog/tutorial-advanced-use-case-for-gitlab-pipeline-execution-policies","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Advanced use case for GitLab Pipeline Execution Policies\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dan Rabinovitz\"}],\n        \"datePublished\": \"2025-01-22\",\n      }",{"title":5005,"description":5006,"authors":5011,"heroImage":5007,"date":5013,"body":5014,"category":697,"tags":5015},[5012],"Dan Rabinovitz","2025-01-22","[Pipeline execution\npolicies](https://docs.gitlab.com/ee/user/application_security/policies/pipeline_execution_policies.html)\nare a newer addition to the GitLab DevSecOps platform and a powerful\nmechanism to enforce CI/CD jobs across applicable projects. They enable\nplatform engineering or security teams to inject jobs into developers’ YAML\npipeline definition files, guaranteeing that certain CI/CD jobs will execute\nno matter what a developer defines in their \\`.gitlab-ci.yml\\` file. \n\n\nThis article will explain how to utilize pipeline execution policies to\ncreate guardrails around the stages or jobs that a developer can use in\ntheir pipeline definition. In regulated environments, this may be necessary\nto ensure developers adhere to a standard set of jobs or stages in their\nGitLab pipeline. Any job or stage that a developer adds to their pipeline\nthat does not adhere to a corporate standard will cause the pipeline to\nfail. \n\n\nOne example use case for pipeline execution policies is ensuring a security\nscanner job runs. Let’s say an organization has made an investment in a\nthird-party security scanner and they have a requirement that the external\nscan runs before any merge is made into the main branch. Without a pipeline\nexecution policy, a developer could easily skip this step by not including\nthe required code in their `.gitlab-ci.yml` file.  With a pipeline execution\npolicy in place, a security team can guarantee the external security\nscanning job executes regardless of how a developer defines their pipeline.\n\n\nTo use pipeline execution policies to enforce these restrictions requires\ntwo parts: a shell script to make calls to the GitLab API and the policy\nitself. This tutorial uses a bash script; if your runner uses a different\nscripting language, it is easy to adapt to other languages.\n\n\nHere is the example shell script I will use for this exercise:\n\n\n``` \n\n#!/bin/bash\n\n\necho \"Checking pipeline stages and jobs...\"\n\n\n# Pull the group access token from the environment variable\n\nGROUP_ACCESS_TOKEN=\"$PIPELINE_TOKEN\"\n\n\necho \"PROJECT_ID: $PROJECT_ID\"\n\necho \"PIPELINE_ID: $PIPELINE_ID\"\n\n\nif [ -z \"$GROUP_ACCESS_TOKEN\" ]; then  \n  echo \"GROUP_ACCESS_TOKEN (MR_GENERATOR) is not set\"\n  exit 1\nfi\n\n\nif [ -z \"$PROJECT_ID\" ]; then\n  echo \"PROJECT_ID is not set\"\n  exit 1\nfi\n\n\nif [ -z \"$PIPELINE_ID\" ]; then\n  echo \"PIPELINE_ID is not set\"\n  exit 1\nfi\n\n\n# Use the group access token for the API request\n\napi_url=\"$GITLAB_API_URL/projects/$PROJECT_ID/pipelines/$PIPELINE_ID/jobs\"\n\necho \"API URL: $api_url\"\n\n\n# Fetch pipeline jobs using the group access token\n\njobs=$(curl --silent --header \"PRIVATE-TOKEN: $GROUP_ACCESS_TOKEN\"\n\"$api_url\")\n\necho \"Fetched Jobs: $jobs\"\n\n\nif [[ \"$jobs\" == *\"404 Project Not Found\"* ]]; then\n  echo \"Failed to authenticate with GitLab API: Project not found\"\n  exit 1\nfi\n\n\n# Extract stages and jobs\n\npipeline_stages=$(echo \"$jobs\" | grep -o '\"stage\":\"[^\"]*\"' | cut -d '\"' -f 4\n| sort | uniq | tr '\\n' ',')\n\npipeline_jobs=$(echo \"$jobs\" | grep -o '\"name\":\"[^\"]*\"' | cut -d '\"' -f 4 |\nsort | uniq | tr '\\n' ',')\n\n\necho \"Pipeline Stages: $pipeline_stages\"  \n\necho \"Pipeline Jobs: $pipeline_jobs\"\n\n\n# Check if pipeline stages are approved\n\nfor stage in $(echo $pipeline_stages | tr ',' ' '); do \n  echo \"Checking stage: $stage\"\n  if ! [[ \",$APPROVED_STAGES,\" =~ \",$stage,\" ]]; then\n    echo \"Stage $stage is not approved.\"\n    exit 1\n  fi\ndone\n\n\n# Check if pipeline jobs are approved \n\nfor job in $(echo $pipeline_jobs | tr ',' ' '); do\n  echo \"Checking job: $job\"\n  if ! [[ \",$APPROVED_JOBS,\" =~ \",$job,\" ]]; then\n    echo \"Job $job is not approve\n```\n\n\nLet’s break this down a bit. \n\n\nThe first few lines of this code perform some sanity checks, ensuring that a\npipeline ID, project ID, and group access token exist.\n\n\n* A GitLab pipeline ID is a unique numerical identifier that GitLab\nautomatically assigns to each pipeline run.\n\n* A GitLab project ID is a unique numerical identifier assigned to each\nproject in GitLab.\n\n* A GitLab group access token is a token that authenticates and authorizes\naccess to resources at the group level in GitLab. This is in contrast to a\nGitLab personal access token (PAT), which is unique to each user.  \n\n\nThe bulk of the work comes from the [GitLab Projects\nAPI](https://docs.gitlab.com/ee/api/projects.html) call where the script\nrequests the jobs for the specified pipeline. Once you have job information\nfor the currently running pipeline, you can use a simple grep command to\nparse out stage and job names, and store them in variables for comparison.\nThe last portion of the script checks to see if pipeline stages and jobs are\non the approved list. Where do these parameters come from?\n\n\nThis is where [GitLab Pipeline Execution\nPolicies](https://docs.gitlab.com/ee/user/application_security/policies/pipeline_execution_policies.html)\ncome into play. They enable injection of YAML code into a pipeline. How can\nwe leverage injected YAML to execute this shell script?  Here’s a code\nsnippet showing how to do this.\n\n\n```\n\n## With this config, the goal is to create a pre-check job that evaluates\nthe pipeline and fails the job/pipeline if any checks do not pass\n\n\nvariables:\n  GITLAB_API_URL: \"https://gitlab.com/api/v4\"\n  PROJECT_ID: $CI_PROJECT_ID\n  PIPELINE_ID: $CI_PIPELINE_ID\n  APPROVED_STAGES: \".pipeline-policy-pre,pre_check,build,test,deploy\"\n  APPROVED_JOBS: \"pre_check,build_job,test_job,deploy_job\"\n\npre_check:\n  stage: .pipeline-policy-pre\n  script:\n    - curl -H \"PRIVATE-TOKEN:${REPO_ACCESS_TOKEN}\" --url \"https://\u003Cgitlab_URL>/api/v4/projects/\u003Cproject_id>/repository/files/check_settings.sh/raw\" -o pre-check.sh\n    - ls -l\n    - chmod +x pre-check.sh\n    - DEBUG_MODE=false ./pre-check.sh  # Set DEBUG_MODE to true or false\n  allow_failure: true\n```\n\n\nIn this YAML snippet, we set a few variables used in the shell script. Most\nimportantly, this is where approved stages and approved jobs are defined.\nAfter the `variables` section, we then add a new job to the\n`.pipeline-policy-pre` stage. This is a reserved stage for pipeline\nexecution policies and is guaranteed to execute before any stages defined in\na `.gitlab-ci.yml` file.  There is a corresponding `.pipeline-policy-post`\nstage as well, though we will not be using it in this scenario.  \n\n\nThe script portion of the job does the actual work. Here, we leverage a curl\ncommand to execute the shell script defined above. This example includes\nauthentication if it’s located in a private repository. However, if it’s\npublicly accessible, you can forgo this authentication. The last line\ncontrols whether or not the pipeline will fail. In this example, the\npipeline will continue. This is useful for testing – in practice, you would\nlikely set `allow_failure: false` to cause the pipeline to fail. This is\ndesired as the goal of this exercise is to not allow pipelines to continue\nexecution if a developer adds a rogue job or stage.\n\n\nTo utilize this YAML, save it to a `.yml` file in a repository of your\nchoice. We’ll see how to connect it to a policy shortly.\n\n\nNow, we have our script and our YAML to inject into a developer’s pipeline.\nNext, let’s see how to put this together using a pipeline execution policy.\n\n\nLike creating other policies in GitLab, start by creating a new Pipeline\nExecution Policy by navigating to **Secure > Policies** in the left hand\nnavigation menu. Then, choose **New Policy** at the top right, and select\n**Pipeline Execution Policy** from the policy creation options.  \n\n\nFor this exercise, you can leave the **Policy Scope** set to the default\noptions. In the **Actions** section, be sure to choose **Inject** and select\nthe project and file where you’ve saved your YAML code snippet. Click on\n**Update via Merge Request** at the very bottom to create an MR that you can\nthen merge into your project.\n\n\nIf this is your first security policy, clicking on **Merge** in the MR will\ncreate a [Security Policy\nProject](https://docs.gitlab.com/ee/user/application_security/policies/vulnerability_management_policy.html),\nwhich is a project to store all security policies. When implementing any\ntype of security policy in a production environment, [access to this project\nshould be restricted](https://docs.gitlab.com/ee/user/project/members/) so\ndevelopers cannot make changes to security policies. In fact, you may also\nwant to consider storing YAML code that’s used by pipeline execution\npolicies in this project to restrict access as well, though this is not a\nrequirement.  \n\nExecuting a pipeline where this pipeline execution policy is enabled should\nresult in the following output when you attempt to add an invalid stage to\nthe project `.gitlab-ci.yml` file.\n\n\n![Output of attempting an invalid stage to project gitlab-ci.yml\nfile](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098102/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098102394.png)\n\n\nWhile this use case is very focused on one aspect of security and compliance\nin your organization, this opens the door to other use cases. For example,\nyou may want to make group-level variables accessible to every project\nwithin a group; this is possible with pipeline execution policies. Or, you\nmay want to create a golden pipeline and have developers add to it. The\npossibilities are endless. GitLab customers are finding new and exciting\nways to use this new functionality every day.\n\n\nIf you’re a GitLab Ultimate customer, try this out today and let us know how\nyou’re using pipeline execution policies. Not a GitLab Ultimate customer?\n[Sign up for a free\ntrial](https://about.gitlab.com/free-trial/devsecops/) to get started.\n\n\n## Read more\n\n- [How to integrate custom security scanners into\nGitLab](https://about.gitlab.com/blog/how-to-integrate-custom-security-scanners-into-gitlab/)\n\n- [Integrate external security scanners into your DevSecOps\nworkflow](https://about.gitlab.com/blog/integrate-external-security-scanners-into-your-devsecops-workflow/)\n\n- [Why GitLab is deprecating compliance pipelines in favor of security\npolicies](https://about.gitlab.com/blog/why-gitlab-is-deprecating-compliance-pipelines-in-favor-of-security-policies/)\n",[697,917,185,495,9,695],{"slug":5017,"featured":6,"template":700},"tutorial-advanced-use-case-for-gitlab-pipeline-execution-policies","content:en-us:blog:tutorial-advanced-use-case-for-gitlab-pipeline-execution-policies.yml","Tutorial Advanced Use Case For Gitlab Pipeline Execution Policies","en-us/blog/tutorial-advanced-use-case-for-gitlab-pipeline-execution-policies.yml","en-us/blog/tutorial-advanced-use-case-for-gitlab-pipeline-execution-policies",{"_path":5023,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5024,"content":5030,"config":5037,"_id":5039,"_type":14,"title":5040,"_source":16,"_file":5041,"_stem":5042,"_extension":19},"/en-us/blog/tutorial-automated-release-and-release-notes-with-gitlab",{"title":5025,"description":5026,"ogTitle":5025,"ogDescription":5026,"noIndex":6,"ogImage":5027,"ogUrl":5028,"ogSiteName":685,"ogType":686,"canonicalUrls":5028,"schema":5029},"Tutorial: Automate releases and release notes with GitLab","With the GitLab Changelog API, you can automate the generation of release artifacts, release notes, and a comprehensive changelog detailing all user-centric software modifications.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659978/Blog/Hero%20Images/automation.png","https://about.gitlab.com/blog/tutorial-automated-release-and-release-notes-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Automate releases and release notes with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ben Ridley\"}],\n        \"datePublished\": \"2023-11-01\",\n      }",{"title":5025,"description":5026,"authors":5031,"heroImage":5027,"date":5033,"body":5034,"category":693,"tags":5035,"updatedDate":5036},[5032],"Ben Ridley","2023-11-01","***2025 update** - The Changelog API has continued to evolve and now has some great new capabilities we don’t cover in this blog, such as the ability to provide custom changelogs with templated values from your commit history. [Discover more in the official Changelogs docs.](https://docs.gitlab.com/user/project/changelogs/)*\n\nWhen you develop software that users rely on, effective communication about changes with each release is essential. By keeping users informed about new features and any modifications or removals, you ensure they maximize the software's benefits and avoid encountering unpleasant surprises during upgrades.\n\nHistorically, creating release notes and maintaining a changelog has been a laborious task, requiring developers to monitor changes externally or release managers to sift through merge histories. With the GitLab Changelog API, you can use the rich history provided in our git repository to easily create release notes and maintain a changelog.\n\nIn this tutorial, we'll delve into automating releases with GitLab, covering the generation of release artifacts, release notes, and a comprehensive changelog detailing all user-centric software modifications.\n\n## Releases in GitLab\nFirst, let's explore how releases work in GitLab.\n\nIn GitLab, a release is a specific version of your code, identified by a git tag, that includes details about changes since the last release (and release notes) and any related artifacts built from that version of the code, such as Docker images, installation packages, and documentation.\n\nYou can create and track releases in GitLab using the UI by calling our Release API or by defining a special `release` job inside a CI pipeline. In this tutorial, we'll use the `release` job in a CI/CD pipeline, which allows us to extend the automation we're using in our pipelines for testing, code scanning, etc. to also perform automated releases.\n\nTo automate our releases, we first need to answer this question: Where are we going to get the information on changes made for our release notes and our changelog? The answer: Our git repository, which provides us with a rich history of development activity through commit messages and merge commit history. Let's see if we can leverage this rich history to automatically create our notes and changelogs.\n\n## Introducing commit trailers\n[Commit trailers](https://git-scm.com/docs/git-interpret-trailers) are structured entries in your git commits, created by adding simple `\u003CHEADER>:\u003CBODY>` format messages to the end of your commit. The `git` CLI tool can then parse and extract these for use in other systems. An example you might have already used is `git commit --sign-off` to sign off on a commit. This is implemented by adding a `Signed-off-by: \u003CYour Name>` trailer to the commit. We can add any arbitrary structured data here, which makes it a great place to store information that could be useful for our changelog.\n\nIn fact, if we use a `Changelog: \u003Cadded/changed/removed>` trailer in our commits, the GitLab Changelog API will parse these and use them to create a changelog for us automatically!\n\nLet's see this in action by making some changes to a real codebase and performing a release, and generating release notes and changelog entries.\n\n## Our example project\nFor the purposes of this blog, I'm using a simple Python web app repository. Let's pretend Version 1.0.0 of the application was just released and is the current version of the code. I've also created a 1.0.0 release in GitLab, which I did manually because we haven't created our automated release pipeline yet:\n\n![A screenshot of the GitLab UI showing a release for Version 1.0.0](https://about.gitlab.com/images/blogimages/2023-08-22-automated-release-and-release-notes-with-gitlab/1-0-release.png)\n\n## Making our changes\nWe're in rapid development mode, so we're going to be working on releasing Version 2.0.0 of our application today. As part of our 2.0.0 release, we're going to be adding a new feature to our app: A chatbot! And we're also going to be removing the quantum blockchain feature, because we only needed that for our first venture capital funding round. Also, we're going to be adding an automated release job to our CI/CD pipeline for our 2.0.0 release.\n\nFirst, let's remove unneeded features. I've created a merge request that contains the necessary removals. Importantly, we need to ensure we have a commit message that includes the `Changelog: removed` trailer. There's a few ways to do this, such as including it directly in a commit, or performing an interactive rebase and adding it using the CLI. But I think the easiest way in our situation is to leave it until the end and then use the `Edit commit message` button in GitLab to add the trailer to the merge commit like so:\n\n![A screenshot the GitLab UI showing a merge request removing unused features](https://about.gitlab.com/images/blogimages/2023-08-22-automated-release-and-release-notes-with-gitlab/remove-unused-features-mr.png)\n\nIf you use this method, you can also change the merge commit title to something more succinct. I've changed the title of my merge commit to 'Remove Unused Features', as this is what will appear in the changelog entry.\n\nNext, let's add some new functionality for the 2.0.0 release. Again, all we need to do is open another merge request that includes our new features and then edit the merge commit to include the `Changelog: added` trailer and edit the commit title to be more succinct:\n\n![A screenshot of the GitLab UI showing a merge request to add new functionality](https://about.gitlab.com/images/blogimages/2023-08-22-automated-release-and-release-notes-with-gitlab/add-chatbot-mr.png)\n\nNow we're pretty much ready to release 2.0.0. But we don't want to create our release manually this time. So before our release we're going to add some jobs to our `.gitlab-ci.yml` file that will perform the release for us automatically, and generate the respective release notes and changelog entries, when we tag our code with a new version like `2.0.0`.\n\n**Note:** If you want to enforce changelog trailers, consider using something like [Danger to perform automated checks for MR conventions](https://docs.gitlab.com/ee/development/dangerbot.html).\n\n## Building an automated release pipeline\nFor our pipeline to work, we need to create a project access token that will allow us to call GitLab's API to generate changelog entries. [Create a project access token with the API scope](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html#create-a-project-access-token), and then [store the token as a CI/CD variable](https://docs.gitlab.com/ee/ci/variables/#define-a-cicd-variable-in-the-ui) called `CI_API_TOKEN`. We'll reference this variable to authenticate to the API.\n\nNext, we're going to add two new jobs to our `gitlab-ci.yml` file:\n```yaml\nprepare_job:\n  stage: prepare\n  image: alpine:latest\n  rules:\n  - if: '$CI_COMMIT_TAG =~ /^v?\\d+\\.\\d+\\.\\d+$/'\n  script:\n    - apk add curl jq\n    - 'curl -H \"PRIVATE-TOKEN: $CI_API_TOKEN\" \"$CI_API_V4_URL/projects/$CI_PROJECT_ID/repository/changelog?version=$CI_COMMIT_TAG\" | jq -r .notes > release_notes.md'\n  artifacts:\n    paths:\n    - release_notes.md\n\nrelease_job:\n  stage: release\n  image: registry.gitlab.com/gitlab-org/release-cli:latest\n  needs:\n    - job: prepare_job\n      artifacts: true\n  rules:\n  - if: '$CI_COMMIT_TAG =~ /^v?\\d+\\.\\d+\\.\\d+$/'\n  script:\n    - echo \"Creating release\"\n  release:\n    name: 'Release $CI_COMMIT_TAG'\n    description: release_notes.md\n    tag_name: '$CI_COMMIT_TAG'\n    ref: '$CI_COMMIT_SHA'\n    assets:\n      links:\n        - name: 'Container Image $CI_COMMIT_TAG'\n          url: \"https://$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA\"\n```\n\nIn the above configuration, the `prepare_job` uses `curl` and `jq` to call the GitLab Changelog API endpoint and then passes this to our `release_job` to actually create the release. To break it down further:\n- We use the project access token created earlier to call the GitLab Changelog API, which performs the generation of the release notes and we store this as an artifact.\n- We're using the `$CI_COMMIT_TAG` variable as the version. For this to work, we need to be using semantic versioning for our tags (something like `2.0.0` for example), so you'll notice I've also restricted the release job using a `rules` section that checks for a semantic version tag.\n\t- Semantic versioning is required for the GitLab Changelog API to work. It uses this format to find the most recent release to compare to our current release.\n- We use the official `release-cli` image from GitLab. The release-cli is required to use the `release` keyword in a job.\n- We use the `release` keyword to create a release in GitLab. This is a special job keyword reserved for creating a release and populating the required fields.\n- We can pass a file as an argument to the `description` of the release. In our case, it's the file we generated in the `prepare_job`, which was passed to this job as an artifact.\n- We've also included our container image that is being built earlier in the pipeline as a release asset. You can attach any assets you'd like from your build process, such as binaries or documentation by providing a URL to wherever you've uploaded them earlier in the pipeline.\n\n## Performing an automated release\nWith this setup, all we need to do to perform a release is push a tag to our repository that follows our versioning scheme. You can simply push a tag using the CLI, this example uses GitLab's UI to create a tag on the main branch. Create a tag by selecting Code -> Tags -> New Tag on the sidebar:\n![A screenshot of the GitLab UI illustrating how to create a tag](https://about.gitlab.com/images/blogimages/2023-08-22-automated-release-and-release-notes-with-gitlab/create-2-tag.png)\n\nOn creation, our pipelines will start to execute. The GitLab Changelog API will automatically generate release notes for us as markdown, which contains all the changes between this release and the previous release. Here's the resulting markdown generated in our example:\n\n```md\n## 2.0.0 (2023-08-25)\n\n### added (1 change)\n\n- [Add ChatBot](gl-demo-ultimate-bridley/super-devsecops-incorporated/simply-notes-release-demo@0c3601a45af617c5481322bfce4d71db1f911b02) ([merge request](gl-demo-ultimate-bridley/super-devsecops-incorporated/simply-notes-release-demo!4))\n\n### removed (1 change)\n\n- [Remove Unused Features](gl-demo-ultimate-bridley/super-devsecops-incorporated/simply-notes-release-demo@463d453c5ae0f4fc611ea969e5442e3298bf0d8a) ([merge request](gl-demo-ultimate-bridley/super-devsecops-incorporated/simply-notes-release-demo!3))\n```\n\nAs you can see, GitLab has extracted the entries for our release notes automatically using our git commit trailers. In addition, it's helpfully provided links back to the merge request so readers can see more details and discussion around the changes.\n\nAnd now, our final release:\n![The GitLab release UI showing a release for version 2.0.0](https://about.gitlab.com/images/blogimages/2023-08-22-automated-release-and-release-notes-with-gitlab/2-0-release.png)\n\n## Creating the changelog\nNext, we want to update our changelog (which is basically a collated history of all your release notes). You can use a `POST` request to the changelog API endpoint we used earlier to do this.\n\nYou can do this as part of your release pipeline if you like, for example by adding this to the `script` section of your prepare job:\n```sh\n'curl -H \"PRIVATE-TOKEN: $CI_API_TOKEN\" -X POST \"$CI_API_V4_URL/projects/$CI_PROJECT_ID/repository/changelog?version=$CI_COMMIT_TAG\"\n```\n\n**Note that this will actually modify the repository.** It will create a commit to add the latest notes to a `CHANGELOG.md` file:\n![A screenshot of the repository which shows a commit updating the changelog file](https://about.gitlab.com/images/blogimages/2023-08-22-automated-release-and-release-notes-with-gitlab/changelog-api-commit.png)\n\nAnd we are done! By utilizing the rich history provided by `git` with some handy commit trailers, we can leverage GitLab's powerful API and CI/CD pipelines to automate our release process and generate release notes for us.\n\n> If you’d like to explore the project we used for this article, [you can find the project at this link](https://gitlab.com/gitlab-learn-labs/sample-projects/release-automation-demo).\n",[917,785,9,721,696,1105],"2025-06-05",{"slug":5038,"featured":6,"template":700},"tutorial-automated-release-and-release-notes-with-gitlab","content:en-us:blog:tutorial-automated-release-and-release-notes-with-gitlab.yml","Tutorial Automated Release And Release Notes With Gitlab","en-us/blog/tutorial-automated-release-and-release-notes-with-gitlab.yml","en-us/blog/tutorial-automated-release-and-release-notes-with-gitlab",{"_path":5044,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5045,"content":5051,"config":5056,"_id":5058,"_type":14,"title":5059,"_source":16,"_file":5060,"_stem":5061,"_extension":19},"/en-us/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component",{"title":5046,"description":5047,"ogTitle":5046,"ogDescription":5047,"noIndex":6,"ogImage":5048,"ogUrl":5049,"ogSiteName":685,"ogType":686,"canonicalUrls":5049,"schema":5050},"Tutorial: How to set up your first GitLab CI/CD component","Use Python scripts in your GitLab CI/CD pipelines to improve usability. In this step-by-step guide, you'll learn how to get started building your own CI/CD component.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098410/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2826%29_3lH4gZFVIGCndksN6Rlg85_1750098409928.png","https://about.gitlab.com/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: How to set up your first GitLab CI/CD component\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sophia Manicor\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2024-11-12\",\n      }",{"title":5046,"description":5047,"authors":5052,"heroImage":5048,"date":5053,"body":5054,"category":718,"tags":5055},[3088,1957],"2024-11-12","Do you use Python scripts in your GitLab CI pipelines? Do you want to create\npipelines at scale? This tutorial shows how to set up your first [GitLab\nCI/CD component](https://docs.gitlab.com/ee/ci/components/) to deploy Python\nscripts. \n\n\nA [CI/CD component is a reusable single pipeline configuration\nunit](https://about.gitlab.com/blog/introducing-ci-components/).\nUse components to create a small part of a larger pipeline, or even to\ncompose a complete pipeline configuration.\n\n\n# Prerequisites\n\n- Basic Python knowledge\n\n- Working knowledge of GitLab CI\n\n- 8 minutes\n\n\n## Python script \n\n\n* **[The demo Python\nscript](https://gitlab.com/demos/templates/gitlab-python-cicd-component/-/blob/main/src/script.py?ref_type=heads)**\n\n\nThis Python script utilizes a library called\n[ArgParse](https://docs.python.org/3/library/argparse.html) . ArgParse\nallows you to pass variables to script through the command line. This script\ntakes in three arguments:\n\n[Python_container_image](https://docs.gitlab.com/ee/ci/yaml/#image): This is\nthe Python container image you wish to use.\n\n[Stage](https://docs.gitlab.com/ee/ci/yaml/#stage): This is the GitLab CI\nstage in which you job will run in. \n\nName: This is your name.\n\n\n```python\n\nimport argparse\n\n\nparser = argparse.ArgumentParser(description='Python CICD Component\nBoilerplate')\n\nparser.add_argument('python_container_image', type=str,\nhelp='python:3.10-slim')\n\nparser.add_argument('stage', type=str, help='Build')\n\nparser.add_argument('persons_name', type=str, help='Noah')\n\nargs = parser.parse_args()\n\n\npython_container_image = args.python_container_image\n\nstage = args.stage\n\npersons_name = args.persons_name\n\n```\n\n\nThis will take in these three variables and print out simple statements:\n\n\n```python\n\nprint(\"You have chosen \" + python_container_image + \" as the container\nimage\")\n\nprint(\"You have chosen \" + stage + \" as the stage to run this job\")\n\nprint(\"Thank you \" + persons_name + \"! you are succesfully using GitLab CI\nwith a Python script.\")\n\n```\n\n\nTo test this script locally, you can call on the script by utilizing the\nfollowing command:\n\n\n```bash\n\npython3 src/script.py python_container_image stage name\n\n```\n\n\nModify this script accordingly if you’d like to add in your own arguments!\n\n\n## Template \n\n\n* **[Demo of\ntemplate](https://gitlab.com/demos/templates/gitlab-python-cicd-component/-/blob/main/templates/template.yml?ref_type=heads)**\n\n\n**Note:** As long as the `gitlab-ci.yml` is placed in the\ntemplates/directory, the CI/CD component will know to pick it up. We named\nour template `templates.yml`, but any name would work for this YAML file.\n\n\nNow, getting into the fun part of CI/CD components, inputs! \n[Inputs](https://docs.gitlab.com/ee/ci/yaml/inputs.html) allow you to pass\nthrough variables into your pipeline. \n\n\n```yml\n\nspec:\n  inputs:\n    python_container_image:\n      default: python:3.10-slim\n      description: \"Define any python container image\"\n    stage:\n      default: build\n      description: \"Define the stage this job will run in\"\n    persons_name:\n      default: Noah\n      description: \"Put your name here\"\n```\n\nHere we have defined the three inputs that are our arguments in our Python\nscript. You can see for each input we have added in a default value – this\nwill be what the input is set to if not overridden. If we took out this\ndefault keyword the input would become mandatory when we use our component.\nAs it is written now, adding in these inputs when we use our component is\noptional due to our default values.\n\n\nWe can also set descriptions to ensure that other developers can understand\nwhat to input when they use our component. Descriptions are optional but\nthey provide self documentation within the code itself, which is always\nnice.\n\n\nAfter we set up our inputs, let’s write the rest of our component:\n\n\n```yml\n\ncomponent:\n  image: $[[ inputs.python_container_image ]]\n  stage: $[[ inputs.stage ]]\n  before_script:\n    - pip3 install -r src/requirements.txt\n  script: python3 src/script.py $[[ inputs.python_container_image ]] $[[ inputs.stage ]] $[[ inputs.persons_name ]]\n```\n\n\nTo use inputs in our component, we need to use the syntax `$[[\ninputs.$VARIABLE ]]`. In the above code, you can see that we use inputs to\ndefine our image and stage with  `$[[ inputs.python_container_image ]]`\nand   `$[[ inputs.stage ]] `.\n\n\n```\n\nscript: python3 src/script.py $[[ inputs.python_container_image ]] $[[\ninputs.stage ]] $[[ inputs.persons_name ]]\n\n```\n\nDiving into the script section, you can see we call upon our Python script..\nWe are able to pass our inputs in with the help of the ArgParse.\n\n\nNow that you have reviewed how the Python script works and the template has\nbeen set up, it is time to use the component!\n\n\n## Using the component \n\n\n* **[A demo of including the\ncomponent](https://gitlab.com/demos/templates/gitlab-python-cicd-component/-/blob/main/.gitlab-ci.yml?ref_type=heads)\n\n\nIn order to utilize the CI/CD component we just created, we need to include\nit in the `.gitlab-ci.yml` file that is in the root of our directory. \n\n\n```\n\ninclude:\n  # include the component located in the current project from the current SHA\n  - component: $CI_SERVER_FQDN/$CI_PROJECT_PATH/template@$CI_COMMIT_SHA\n    inputs:\n      python_container_image: python:3.11-slim\n      stage: test\n      persons_name: Tanuki\n```\n\n\nOne way to include it is to call upon it locally in the current project from\nthe current `Commit SHA`. You can find other ways to [reference a component\nin our\ndocumentation](https://docs.gitlab.com/ee/ci/components/#use-a-component).\n\n\nTo override the defaults, we have passed in other inputs so we get the\ncorrect image, stage, and name for our job. \n\n\nTry and change the `persons_names` to your own and watch the pipeline run!\n\n\n![ci/cd component tutorial - pipeline\nrunning](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098419/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098418901.png)\n\n\nVoila! You have learned how to set up a basic C/ICD component utilizing a\nPython ArgParse script!\n\n\n## What's next?\n\nIn the Python script, there is a commented out GitLab Python library and OS\nlibrary. If you would like to interact with the GitLab API, you can\nuncomment these and add in a [GitLab personal access\ntoken](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html)\nto the [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) named\n`GLPAT`.\n\n\n```\n\nimport gitlab\n\nimport os\n\n```\n\nAfterwards you can then interact with the GitLab API.\n\n\n```\n\nglpat = os.environ['GLPAT']\n\n\ngl = gitlab.Gitlab(private_token=glpat)\n\n# SELF_HOSTED gl = gitlab.Gitlab(url='https://gitlab.example.com',\nprivate_token='xxxxxxxxxxxxxx')\n\ntry:\n   projects = gl.projects.list(get_all=True)\n   print(projects)\nexcept Exception as error:\n   print(\"Error:\", error)\n```\n\n\n> Learn more about CI/CD components and how to avoid building pipelines from\nscratch with the [GitLab CI/CD\nCatalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/). \n\n\n## Read more\n\n\n- [FAQ: GitLab CI/CD\nCatalog](https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog/)\n\n- [Introducing CI/CD Steps, a programming language for DevSecOps\nautomation](https://about.gitlab.com/blog/introducing-ci-cd-steps-a-programming-language-for-devsecops-automation/)\n\n- [A CI/CD component builder's\njourney](https://about.gitlab.com/blog/a-ci-component-builders-journey/)\n",[9,917,696,1127],{"slug":5057,"featured":91,"template":700},"tutorial-how-to-set-up-your-first-gitlab-ci-cd-component","content:en-us:blog:tutorial-how-to-set-up-your-first-gitlab-ci-cd-component.yml","Tutorial How To Set Up Your First Gitlab Ci Cd Component","en-us/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component.yml","en-us/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component",{"_path":5063,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5064,"content":5070,"config":5076,"_id":5078,"_type":14,"title":5079,"_source":16,"_file":5080,"_stem":5081,"_extension":19},"/en-us/blog/tutorial-integrate-gitlab-merge-request-approvals-with-external-systems",{"title":5065,"description":5066,"ogTitle":5065,"ogDescription":5066,"noIndex":6,"ogImage":5067,"ogUrl":5068,"ogSiteName":685,"ogType":686,"canonicalUrls":5068,"schema":5069},"Tutorial: Integrate GitLab Merge Request approvals with external systems","Learn how to improve GitLab extensibility and integration with external applications in this demo. The result: a seamless integration that provides more control over merge requests.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676011/Blog/Hero%20Images/blog-image-template-1800x945.svg","https://about.gitlab.com/blog/tutorial-integrate-gitlab-merge-request-approvals-with-external-systems","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Integrate GitLab Merge Request approvals with external systems\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Samer Akkoub\"}],\n        \"datePublished\": \"2024-10-08\",\n      }",{"title":5065,"description":5066,"authors":5071,"heroImage":5067,"date":5073,"body":5074,"category":693,"tags":5075},[5072],"Samer Akkoub","2024-10-08","GitLab customers often ask how to connect merge requests to external applications, such as ServiceNow or custom-built applications, to control approvals for the merging of code into a target branch from these external systems. To address this need, GitLab offers [External Status Check](https://docs.gitlab.com/ee/user/project/merge_requests/status_checks.html), a powerful feature that allows the sending of API calls to external systems to request the status of an external requirement, providing seamless integration and control over your merge requests.\n\nIn this article, I'll demonstrate this feature by explaining how to deploy an application I developed. The application is designed to receive status check requests from GitLab Merge Requests, list them, and enable external users to approve/reject these requests without logging in to the GitLab console. As a result, GitLab platform architects will better understand GitLab extensibility and integration with external systems.\n\nThe provided sample application can:\n1. Receive API requests from merge requests.\n2. Store the requests in AlchemyDB running on the same instance.\n3. Show Approve/Reject buttons for each row to approve or reject the corresponding merge request status check.\n\n## How to deploy the status review demo application\n1. Import this [GitLab repo project](https://gitlab.com/sakkoub-publicgroup/external-approval-app) to your GitLab account.\n2. The project pipeline will deploy the application to a Kubernetes cluster. To achieve this, define a [GitLab Agent](https://docs.gitlab.com/ee/user/clusters/agent/install/index.html) for Kubernetes in a separate project and include a path to the cloned project under the “[user_access](https://docs.gitlab.com/ee/user/clusters/agent/user_access.html)” section in the agent configuration.\n3. Add a new environment variable `KUBE_CONTEXT`, with the value equal to the used agent path:name, similar to the following structure `path/to/agent/project:agent-name`.\n4. The status check application will be deployed to the `approval-app` namespace by default.\n5. Create the `approval-app` namespace in the target Kubernetes cluster.\n6. In the created namespace, add a secret named `gitlab-token` with the value set to the personal access token (PAT) of the user who will be approving the requests. The approval application will use this PAT to communicate back to the GitLab instance.\n7. Run the status check application pipeline on the main branch.\n8. Once deployed, the application will be exposed behind a load balancer. Use this command to grab the public IP address of the load balancer: `kubectl get services -n approval-app`.\n9. The application can then be accessed using this URL: http://EXTERNAL-IP/approval-apps/. Replace the `EXTERNAL-IP` with the value of the external IP address from the previous step. The resulting page should look like below (the table would be empty as we have not added any new merge requests yet).\n\n![Table showing IP address](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752507534/v0pgvobf09eh9yqxqzrk.png)\n\n## Configure status check in GitLab\n\n1. In the GitLab project where the external status check needs to be configured, from the left menu, navigate under settings **-\\> Merge Request** and scroll down to **Status checks**.\n2. Click on **Add status check**.\n3. Add a service name.\n4. For the API to check enter: `[http://EXTERNAL-IP[/approval-apps/status_check`. Replace the `EXTERNAL-IP` with the external IP address found in the previous steps.\n5. Leave the `Target Branch` to the default, or select branch if you want this check to be triggered only for merge requests against certain branches.\n6. Leave `HMAC Shared Secret` as it is and click **Add status check**.\n\n![How to configure status check](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752507426/jal2hw9ef3pydbetbp7p.png)\n\n## Test everything together\n\n1. In the project where you have configured the external check, create a new merge request from any branch targeting the main branch (assuming the main branch was selected when the external check was configured in the previous section).\n2. In the merge request details, look for the **Status checks** section and it should show `1 Pending`.\n3. Now, in a new tab, open the deployed external check application using this URL (replace `EXTERNAL-IP` with the value of the external IP address from the previous steps): `http://EXTERNAL-IP/approval-apps/`.\n4. A new entry should show in the list for the request external check from the merge request just created. Click on **Approve**.\n5. Switch back to the merge request's details screen and notice how the merge request is showing an approved status now.\n\n## Debugging tips\n\nUse the following notes to debug if something does not go as planned:\n\nIt is always helpful to view the logs for the external status check application. To do so: \n   1. Extract the name of the application pod using this command: `kubectl get pods -n approval-app`.\n   2. View the pod logs `kubectl logs [THE NAME OF THE POD] -n approval-app`.\n\nYou can SSH into the application pod and view the database (Alchemydb), which is used for the application. \n   1. `kubectl exec -it \\[POD-NAME\\] -n approval-app -- /bin/sh` \n   2. `cd instance`\n   3. `sqlite3 gitlab_status_checks.db` \n   4. To view the database tables, type `.tables`.\n   5. To describe the table structure, type `PRAGMA table_info('status_check');`.\n   6. To view all the records in the `status_check` table, type `select * from status_check`.\n\n> Discover more about [GitLab External Status Check](https://docs.gitlab.com/ee/user/project/merge_requests/status_checks.html) and how to gain more control over merge requests.\n",[917,693,695,9],{"slug":5077,"featured":6,"template":700},"tutorial-integrate-gitlab-merge-request-approvals-with-external-systems","content:en-us:blog:tutorial-integrate-gitlab-merge-request-approvals-with-external-systems.yml","Tutorial Integrate Gitlab Merge Request Approvals With External Systems","en-us/blog/tutorial-integrate-gitlab-merge-request-approvals-with-external-systems.yml","en-us/blog/tutorial-integrate-gitlab-merge-request-approvals-with-external-systems",{"_path":5083,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5084,"content":5089,"config":5094,"_id":5096,"_type":14,"title":5097,"_source":16,"_file":5098,"_stem":5099,"_extension":19},"/en-us/blog/ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation",{"title":5085,"description":5086,"ogTitle":5085,"ogDescription":5086,"noIndex":6,"ogImage":1339,"ogUrl":5087,"ogSiteName":685,"ogType":686,"canonicalUrls":5087,"schema":5088},"Ultimate guide to CI/CD: Fundamentals to advanced implementation","Learn how to modernize continuous integration/continuous deployment, including automating the development, delivery, and security of pipelines.","https://about.gitlab.com/blog/ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Ultimate guide to CI/CD: Fundamentals to advanced implementation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2025-01-06\",\n      }",{"title":5085,"description":5086,"authors":5090,"heroImage":1339,"date":5091,"body":5092,"category":741,"tags":5093},[4973],"2025-01-06","Continuous integration/continuous delivery ([CI/CD](https://about.gitlab.com/topics/ci-cd/)) has revolutionized how software teams create value for their users. Gone are the days of manual deployments and integration headaches — modern development demands automation, reliability, and speed.\n\nAt its core, CI/CD is about creating a seamless pipeline that takes code from a developer's environment all the way to production and incorporates feedback in real time. [CI](https://about.gitlab.com/topics/ci-cd/benefits-continuous-integration/) helps teams catch issues early — before they become costly problems — by ensuring that code changes are frequently merged into a shared repository, automatically tested, and validated. [CD](https://about.gitlab.com/topics/ci-cd/#what-is-continuous-delivery-cd) extends this by automating deployments, making releases predictable and stress-free.\n\nRather than relying on manual processes and complex toolchains for software development, teams can use a robust CI/CD pipeline to build, test, and deploy software. And AI can streamline the process even further, automatically engineering CI/CD pipelines for consistent quality, compliance, and security checks.\n\nThis guide explains modern CI/CD pipelines, from basic principles to best practices to advanced strategies. You'll also discover how leading organizations use CI/CD for impactful results. What you learn in this guide will help you scale your DevSecOps environment to develop and deliver software in an [agile](https://about.gitlab.com/topics/ci-cd/continuous-integration-agile/), automated, and efficient manner.\n\nWhat you'll learn:\n- [What is continuous integration?](#what-is-continuous-integration%3F)\n- [What is continuous delivery?](#what-is-continuous-delivery%3F)\n- [How source code management relates to CI/CD](#how-source-code-management-relates-to-cicd)\n- [The benefits of CI/CD in modern software development](#the-benefits-of-cicd-in-modern-software-development)\n  - [Key differences between CI/CD and traditional development](#key-differences-between-cicd-and-traditional-development)\n- [Understanding CI/CD fundamentals](#understanding-cicd-fundamentals)\n  - [What is a CI/CD pipeline?](#what-is-a-cicd-pipeline%3F)\n- [Best practices for CI/CD implementation and management](#best-practices-for-cicd-implementation-and-management)\n  - [CI best practices](#ci-best-practices)\n  - [CD best practices](#cd-best-practices)\n- [How to get started with CI/CD](#how-to-get-started-with-cicd)\n- [Security, compliance, and CI/CD](#security-compliance%2C-and-cicd)\n- [CI/CD and the cloud](#cicd-and-the-cloud)\n- [Advanced CI/CD](#advanced-cicd)\n  - [Reuse and automation in CI/CD](#reuse-and-automation-in-cicd)\n  - [Troubleshooting pipelines with AI](#troubleshooting-pipelines-with-ai)\n- [How to migrate to GitLab CI/CD](#how-to-migrate-to-gitlab-cicd)\n- [Lessons from leading organizations](#lessons-from-leading-organizations)\n- [CI/CD tutorials](#cicd-tutorials)\n\n## What is continuous integration?\n\n[Continuous integration](https://about.gitlab.com/topics/ci-cd/benefits-continuous-integration/) (CI) is the practice of integrating all your code changes into the main branch of a shared source code repository early and often, automatically testing changes when you commit or merge them, and automatically kicking off a build. With continuous integration, teams can identify and fix errors and security issues more easily and much earlier in the development process.\n\n## What is continuous delivery?\n[Continuous delivery](https://about.gitlab.com/topics/ci-cd/#what-is-continuous-delivery-cd) (CD) – sometimes called _continuous deployment_ – enables organizations to deploy their applications automatically, allowing more time for developers to focus on monitoring deployment status and assure success. With continuous delivery, DevSecOps teams set the criteria for code releases ahead of time and when those criteria are met and validated, the code is deployed into the production environment. This allows organizations to be more nimble and get new features into the hands of users faster. \n\n## How source code management relates to CI/CD\n\nSource code management ([SCM](https://about.gitlab.com/solutions/source-code-management/)) and CI/CD form the foundation of modern software development practices. SCM systems like [Git](https://about.gitlab.com/blog/what-is-git-the-ultimate-guide-to-gits-role-and-functionality/) provide a centralized way to track changes, manage different versions of code, and facilitate collaboration among team members. When developers work on new features or bug fixes, they create branches from the main codebase, make their changes, and then [merge them through merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/). This branching strategy allows multiple developers to work simultaneously without interfering with each other's code, while maintaining a stable main branch that always contains production-ready code.\n\nCI/CD takes the code managed by SCM systems and automatically builds, tests, and validates it whenever changes are pushed. When a developer submits their code changes, the CI/CD system automatically retrieves the latest code, combines it with the existing codebase, and runs through a series of automated checks. These typically include compiling the code, running unit tests, performing static code analysis, and checking code coverage. If any of these steps fail, the team is immediately notified, allowing them to address issues before they impact other developers or make their way to production. This tight integration between source control and continuous integration creates a feedback loop that helps maintain code quality and prevents integration problems from accumulating.\n\n## The benefits of CI/CD in modern software development\n\n[CI/CD brings transformative benefits to modern software development](https://about.gitlab.com/blog/ten-reasons-why-your-business-needs-ci-cd/) by dramatically reducing the time and risk associated with delivering new features and fixes. The continuous feedback loop gives DevSecOps teams confidence their changes are automatically validated against the entire codebase. The result is higher quality software, faster delivery times, and more frequent releases that can quickly respond to user needs and market demands.\n\nPerhaps most importantly, CI/CD fosters a culture of collaboration and transparency within software development teams. When everyone can see the status of builds, tests, and deployments in real time, it becomes easier to identify and resolve bottlenecks in the delivery process. The automation provided by CI/CD also reduces the cognitive load on developers, freeing them to focus on writing code rather than managing manual deployment processes. This leads to improved developer satisfaction and productivity, while also reducing the risk traditionally associated with the entire software release process. Teams can experiment more freely knowing rapid code reviews are part of the process and they can quickly roll back changes if needed, which encourages innovation and continuous improvement.\n\n> Get started with GitLab CI/CD. [Sign up for GitLab Ultimate](https://about.gitlab.com/free-trial/devsecops/) and try the AI-powered DevSecOps platform free.\n\n### Key differences between CI/CD and traditional development\n\nCI/CD differs from traditional software development in many ways, including:\n\n**Frequent code commits**\n\nDevelopers often work independently and infrequently upload their code to a main codebase, causing merge conflicts and other time-consuming issues. With CI/CD, developers push commits throughout the day, ensuring that conflicts are caught early and the codebase remains up to date.\n\n**Reduced risk**\n\nLengthy testing cycles and extensive pre-release planning are hallmarks of traditional software development. This is done to minimize risk but often hinders the ability to find and fix problems. Risk is managed in CI/CD by applying small, incremental changes that are closely monitored and easily reverted.\n\n**Automated and continuous testing**\n\nIn traditional software development, testing is done once development is complete. However, this causes problems, including delayed delivery and costly bug fixes. CI/CD supports automated testing that occurs continuously throughout development, sparked by each code commit. Developers also receive feedback they can take fast action on.\n\n**Automated, repeatable, and frequent deployments**\n\nWith CI/CD, deployments are automated processes that reduce the typical stress and effort associated with big software rollouts. The same deployment process can be repeated across environments, which saves time and reduces errors and inconsistencies.\n\n## Understanding CI/CD fundamentals\n\nCI/CD serves as a framework for building scalable, maintainable delivery processes, so it's critical for DevSecOps teams to firmly grasp its core concepts. A solid understanding of CI/CD principles enables teams to adapt strategies and practices as technology evolves, rather than being tied to legacy approaches. Here are some of the basics.\n\n### What is a CI/CD pipeline?\n\nA [CI/CD pipeline](https://about.gitlab.com/topics/ci-cd/cicd-pipeline/) is a series of steps, such as build, test, and deploy, that automate and streamline the software delivery process. [Each stage serves as a quality gate](https://about.gitlab.com/blog/guide-to-ci-cd-pipelines/), ensuring that only validated code moves forward. Early stages typically handle basic checks like compilation and unit testing, while later stages may include integration testing, performance testing, compliance testing, and staged deployments to various environments.\n\nThe pipeline can be configured to require manual approvals at critical points, such as before deploying to production, while automating routine tasks and providing quick feedback to developers about the health of their changes. This structured approach ensures consistency, reduces human error, and provides a clear audit trail of how code changes move from development to production. Modern pipelines are often implemented as code, allowing them to be version controlled, tested, and maintained just like application code.\n\nThese are other terms associated with CI/CD that are important to know:\n- **Commit:** a code change\n- **Job:** instructions a runner has to execute\n- **Runner:** an agent or server that executes each job individually that can spin up or down as needed\n- **Stages:** a keyword that defines certain job stages, such as \"build\" and \"deploy.\" Jobs of the same stage are executed in parallel. Pipelines are configured using a version-controlled YAML file, `.gitlab-ci.yml`, at the root level of a project.\n\n![CI/CD pipeline diagram](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673928/Blog/Content%20Images/1690824533476.png)\n\n## Best practices for CI/CD implementation and management\n\nHow successful you are with CI/CD depends greatly on the [best practices](https://about.gitlab.com/blog/how-to-keep-up-with-ci-cd-best-practices/) you implement. \n\n#### CI best practices\n\n* Commit early, commit often.\n* Optimize pipeline stages.\n* Make builds fast and simple.\n* Use failures to improve processes.\n* Make sure the test environment mirrors production.\n\n#### CD best practices\n\n* Start where you are – you can always iterate.\n* Understand the best continuous delivery is done with minimal tools.\n* Track what’s happening so issues and merge requests don't get out of hand.\n* Streamline user acceptance testing and staging with automation.\n* Manage the release pipeline through automation.\n* Implement monitoring for visibility and efficiency. \n\n> ### Bookmark this!\n>\n>Watch our [\"Intro to CI/CD\" webinar](https://www.youtube.com/watch?v=sQ7Nw3o0izc)!\n>\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://www.youtube.com/embed/sQ7Nw3o0izc?si=3HpNqIClrc2ncr7Y\" title=\"Intro to CI/CD webinar\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## How to get started with CI/CD\n\nGetting started with CI/CD begins with identifying a simple but representative project to serve as your pilot. Choose a straightforward application with basic testing requirements, as this allows you to focus on learning the pipeline mechanics rather than dealing with complex deployment scenarios. Begin by ensuring your code is in [version control](https://about.gitlab.com/topics/version-control/) and has some [basic automated tests](https://about.gitlab.com/blog/develop-c-unit-testing-with-catch2-junit-and-gitlab-ci/) — even a few unit tests will suffice. The goal is to [create a minimal pipeline](https://about.gitlab.com/blog/how-to-learn-ci-cd-fast/) that you can gradually enhance as your understanding grows.\n\nFor GitLab specifically, the process starts with creating a `.gitlab-ci.yml` file in your project's root directory. This YAML file defines your pipeline stages (basic ones like build, test, and deploy) and jobs. A simple pipeline might look like this: The build stage compiles your code and creates artifacts, the test stage runs your unit tests, and the deploy stage pushes your application to a staging environment. GitLab will automatically detect this file and start running your pipeline whenever changes are pushed to your repository. The platform provides [built-in runners](https://docs.gitlab.com/runner/) to execute your pipeline jobs, though you can also set up your own runners for more control.\n\nAs you become comfortable with the basics, gradually add more sophisticated elements to your pipeline. This might include adding code quality checks, [security scanning](https://docs.gitlab.com/ee/user/application_security/#security-scanning), or automated deployment to production. GitLab's DevSecOps platform includes features like [compliance management](https://about.gitlab.com/blog/meet-regulatory-standards-with-gitlab/), [deployment variables](https://about.gitlab.com/blog/demystifying-ci-cd-variables/), and manual approval gates that you can incorporate as your pipeline matures. Pay attention to pipeline execution time and look for opportunities to run jobs in parallel where possible. Remember to add proper error handling and notifications so team members are promptly alerted of any pipeline failures. Start documenting common issues and solutions as you encounter them — this will become invaluable as your team grows.\n\n> ### Want to learn more about getting started with CI/CD? Register for a [free CI/CD course on GitLab University](https://university.gitlab.com/courses/continuous-integration-and-delivery-ci-cd-with-gitlab).\n\n## Security, compliance, and CI/CD\n\nOne of the greatest advantages of CI/CD is the ability to embed security and compliance checks early and often in the software development lifecycle. In GitLab, teams can use the `.gitlab-ci.yml` configuration to automatically trigger security scans at multiple stages, from initial code commit to production deployment. The platform's container scanning, dependency scanning, and security scanning capabilities ([Dynamic Application Security Testing](https://docs.gitlab.com/ee/user/application_security/dast/) and [Advanced SAST](https://about.gitlab.com/blog/gitlab-advanced-sast-is-now-generally-available/)) can be configured to run automatically with each code change, checking for vulnerabilities, compliance violations, and security misconfigurations. The platform's API enables integration with [external security tools](https://about.gitlab.com/blog/integrate-external-security-scanners-into-your-devsecops-workflow/), while the test coverage features ensure security tests meet required thresholds.\n\nGitLab's security test reports provide detailed information about findings, enabling quick remediation of security issues before they reach production. The Security Dashboard provides a centralized view of vulnerabilities across projects, while [security policies can be enforced](https://about.gitlab.com/blog/how-gitlab-supports-the-nsa-and-cisa-cicd-security-guidance/) through merge request approvals and pipeline gates. In addition, GitLab provides multiple layers of secrets management to protect sensitive information throughout the CI/CD process, audit logs to track access to secrets, and role-based access control (RBAC) to ensure only authorized users can view or modify sensitive configuration data.\n\nGitLab also supports software bill of materials ([SBOM](https://about.gitlab.com/blog/the-ultimate-guide-to-sboms/)) generation, providing a comprehensive inventory of all software components, dependencies, and licenses in an application and enabling teams to quickly identify and respond to vulnerabilities and comply with regulatory mandates.\n\n## CI/CD and the cloud\n\nGitLab's CI/CD platform provides robust integration with major cloud providers including [Amazon Web Services](https://about.gitlab.com/partners/technology-partners/aws/), [Google Cloud Platform](https://about.gitlab.com/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci/), and [Microsoft Azure](https://docs.gitlab.com/ee/install/azure/), enabling teams to automate their cloud deployments directly from their pipelines. Through GitLab's cloud integrations, teams can manage cloud resources, deploy applications, and monitor cloud services all within the GitLab interface. The platform's built-in cloud deployment templates and [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) features significantly reduce the complexity of cloud deployments, allowing teams to focus on application development rather than infrastructure management. For organizations that want to automate their IT   infrastructure using GitOps, GitLab has a [Flux CD integration](https://about.gitlab.com/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab/).\n\nGitLab's cloud capabilities extend beyond basic deployment automation. The platform's [Kubernetes integration](https://about.gitlab.com/blog/kubernetes-overview-operate-cluster-data-on-the-frontend/) enables teams to manage container orchestration across multiple cloud providers, while the [cloud native GitLab installation options](https://about.gitlab.com/topics/ci-cd/cloud-native-continuous-integration/) allow the platform itself to run in cloud environments. Through GitLab's cloud-native features, teams can implement auto-scaling runners that dynamically provision cloud resources for pipeline execution, optimizing costs and performance. The platform's integration with cloud provider security services ensures that security and compliance requirements are met throughout the deployment process.\n\nFor multi-cloud environments, GitLab provides consistent workflows and tooling regardless of the underlying cloud provider. Teams can use GitLab's environment management features to handle different cloud configurations across development, staging, and production environments. The platform's [infrastructure as code](https://docs.gitlab.com/ee/user/infrastructure/iac/) support, particularly its native integration with Terraform, enables teams to version control and automate their cloud infrastructure provisioning. GitLab's monitoring and observability features integrate with cloud provider metrics, providing comprehensive visibility into application and infrastructure health across cloud environments.\n\n## Advanced CI/CD \nCI/CD has evolved far beyond simple build and deploy pipelines. In advanced implementations, CI/CD involves sophisticated orchestration of automated testing, security scanning, infrastructure provisioning, AI, and more. Here are a few advanced CI/CD strategies that can help engineering teams scale their pipelines and troubleshoot issues even as architectural complexity grows.\n\n### Reuse and automation in CI/CD\n\nGitLab is transforming how development teams create and manage CI/CD pipelines with two major innovations: the [CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/) and [CI/CD steps](https://about.gitlab.com/blog/introducing-ci-cd-steps-a-programming-language-for-devsecops-automation/), a new programming language for DevSecOps automation currently in experimental phase. The CI/CD Catalog is a centralized platform where developers can discover, reuse, and contribute CI/CD components. Components function as reusable, single-purpose building blocks that simplify pipeline configuration — similar to Lego pieces for CI/CD workflows. Meanwhile, CI/CD steps support complex workflows by allowing developers to compose inputs and outputs for a CI/CD job. With the CI/CD Catalog and CI/CD steps, DevSecOps teams can easily standardize CI/CD and its components, simplifying the process of developing and maintaining CI/CD pipelines.\n\n> Learn more in our [CI/CD Catalog FAQ](https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog/) and [CI/CD steps documentation](https://docs.gitlab.com/ee/ci/steps/).\n\n### Troubleshooting pipelines with AI\n\nWhile CI/CD pipelines can and do break, troubleshooting the issue quickly can minimize the impact. GitLab Duo Root Cause Analysis, part of a suite of AI-powered features, removes the guesswork by [determining the root cause for a failed CI/CD pipeline](https://about.gitlab.com/blog/quickly-resolve-broken-ci-cd-pipelines-with-ai/). When a pipeline fails, GitLab provides detailed job logs, error messages, and execution traces that show exactly where and why the failure occurred. Root Cause Analysis then uses AI to suggest a fix.\nWatch GitLab Duo Root Cause Analysis in action:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://www.youtube.com/embed/sTpSLwX5DIs?si=J6-0Bf6PtYjrHX1K\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## How to migrate to GitLab CI/CD\n\nMigrating to the DevSecOps platform and its built-in CI/CD involves a systematic approach of analyzing your existing pipeline configurations, dependencies, and deployment processes to map them to GitLab's equivalent features and syntax. Use these guides to help make the move.\n\n* [How to migrate from Bamboo to GitLab CI/CD](https://about.gitlab.com/blog/migrating-from-bamboo-to-gitlab-cicd/)\n* [Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment](https://about.gitlab.com/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment/)\n* [GitHub to GitLab migration the easy way](https://about.gitlab.com/blog/github-to-gitlab-migration-made-easy/)\n\n## Lessons from leading organizations\n\nThese leading organizations migrated to GitLab and are enjoying the myriad benefits of CI/CD. Read their stories.\n\n- [Lockheed Martin](https://about.gitlab.com/customers/lockheed-martin/)\n- [Indeed](https://about.gitlab.com/blog/how-indeed-transformed-its-ci-platform-with-gitlab/)\n- [CARFAX](https://about.gitlab.com/customers/carfax/)\n- [HackerOne](https://about.gitlab.com/customers/hackerone/)\n- [Betstudios](https://about.gitlab.com/blog/betstudios-cto-on-improving-ci-cd-capabilities-with-gitlab-premium/)\n- [Thales and Carrefour](https://about.gitlab.com/blog/how-carrefour-and-thales-are-evolving-their-ci-cd-platforms/)\n\n## CI/CD tutorials\n\nBecome a CI/CD expert with these easy-to-follow tutorials.\n\n* [Basics of CI: How to run jobs sequentially, in parallel, or out of order](https://about.gitlab.com/blog/basics-of-gitlab-ci-updated/)\n* [How to set up your first GitLab CI/CD component](https://about.gitlab.com/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component/)\n* [Building a GitLab CI/CD pipeline for a monorepo the easy way](https://about.gitlab.com/blog/building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way/)\n* [Using child pipelines to continuously deploy to five environments](https://about.gitlab.com/blog/using-child-pipelines-to-continuously-deploy-to-five-environments/)\n* [CI/CD automation: Maximize 'deploy freeze' impact across GitLab groups](https://about.gitlab.com/blog/ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups/)\n* [Refactoring a CI/CD template to a CI/CD component](https://about.gitlab.com/blog/refactoring-a-ci-cd-template-to-a-ci-cd-component/)\n* [Annotate container images with build provenance using Cosign in GitLab CI/CD](https://about.gitlab.com/blog/annotate-container-images-with-build-provenance-using-cosign-in-gitlab-ci-cd)\n\n> #### Get started with GitLab CI/CD. [Sign up for GitLab Ultimate](https://about.gitlab.com/free-trial/devsecops/) and try the AI-powered DevSecOps platform.",[9,696,495,917,697,693],{"slug":5095,"featured":91,"template":700},"ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation","content:en-us:blog:ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation.yml","Ultimate Guide To Ci Cd Fundamentals To Advanced Implementation","en-us/blog/ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation.yml","en-us/blog/ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation",{"_path":5101,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5102,"content":5108,"config":5114,"_id":5116,"_type":14,"title":5117,"_source":16,"_file":5118,"_stem":5119,"_extension":19},"/en-us/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab",{"title":5103,"description":5104,"ogTitle":5103,"ogDescription":5104,"noIndex":6,"ogImage":5105,"ogUrl":5106,"ogSiteName":685,"ogType":686,"canonicalUrls":5106,"schema":5107},"Ultimate guide to migrating from AWS CodeCommit to GitLab","Learn how to migrate from AWS Services to GitLab and seamlessly integrate with the DevSecOps platform in this comprehensive tutorial.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097810/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2828%29_4mi0l4wzUa5VI4wtf8gInx_1750097810027.png","https://about.gitlab.com/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Ultimate guide to migrating from AWS CodeCommit to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tsukasa Komatsubara\"},{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"},{\"@type\":\"Person\",\"name\":\"Samer Akkoub\"},{\"@type\":\"Person\",\"name\":\"Bart Zhang\"}],\n        \"datePublished\": \"2024-08-26\",\n      }",{"title":5103,"description":5104,"authors":5109,"heroImage":5105,"date":3285,"body":5112,"category":693,"tags":5113},[5110,780,5072,5111],"Tsukasa Komatsubara","Bart Zhang","On July 25, 2024, AWS made a significant announcement regarding its CodeCommit service. As detailed in their [official blog post](https://aws.amazon.com/blogs/devops/how-to-migrate-your-aws-codecommit-repository-to-another-git-provider/), AWS has decided to close new customer access to CodeCommit. While existing customers can continue using the service, AWS will not introduce new features, focusing only on security, availability, and performance improvements.\n\nThis announcement has prompted development teams to consider migrating their repositories to alternative Git providers. In light of these changes, we've prepared this comprehensive guide to assist teams in migrating to GitLab and integrating with other AWS services.\n\n**Note:** For more details on AWS's official migration recommendations, please refer to [their blog post](https://aws.amazon.com/blogs/devops/how-to-migrate-your-aws-codecommit-repository-to-another-git-provider/).\n\n## About this guide\n\nThis guide provides comprehensive information for development teams using GitLab who are considering integration with AWS services or planning to migrate from AWS-hosted Git repositories to GitLab.com. The guide is structured into three main sections:\n\n- [Parallel migration to GitLab](#section-1-parallel-migration-to-gitlab): Explains how to gradually migrate from existing AWS-hosted repositories to GitLab.com while minimizing risks.\n\n- [Integration with AWS CodeBuild](#section-2-integrating-gitlab-with-aws-codebuild): Provides steps to integrate GitLab repositories with AWS CodeBuild, setting up a powerful continuous integration (CI) environment.\n\n- [Integration with AWS CodePipeline](#section-3-integrating-gitlab-with-aws-codepipeline): Details how to connect GitLab repositories with AWS CodePipeline to build efficient continuous delivery (CD) pipelines.\n\n- [Downstream integrations for CodePipeline and CodeStar Connections](#section-4-migrating-to-gitlab): Explains how to leverage GitLab-AWS connections for widespread service access, unlocking a cascade of integration possibilities across the AWS ecosystem.\n\nThrough this guide, you'll learn how to combine the powerful features of GitLab and AWS to create an efficient and flexible development workflow.\n\n## Section 1: Parallel migration to GitLab \n\nFor those considering migrating Git repositories hosted on AWS to GitLab.com, this section, which is a phased approach, introduces methods to achieve migration while minimizing risks. By leveraging GitLab's mirroring capabilities, you can maintain existing development flows while testing the new environment.\n\n### Why is parallel migration important?\n\nLarge-scale system migrations always involve risks, particularly potential impacts on ongoing development work, existing integrations, and automated processes. Adopting a parallel migration approach offers the following benefits:\n\n1. Risk minimization: Test the new environment while keeping existing systems operational.\n2. Seamless transition: Development teams can gradually acclimate to the new system.\n3. Integration testing: Thoroughly test all integrations and automation in the new environment.\n4. Future-proofing: Enable teams to gradually migrate to GitLab CI/CD in parallel to existing CI.\n\nParallel migration is not required if it is already known that you want to cut over directly to GitLab.\n\n### Steps for migrating to GitLab.com\n\n#### Step 1: Get set up on GitLab.com\n\n- Check if your company already has a group in use on GitLab.com and whether they have single sign-on (SSO) set up – if they do, then you will want to use both.\n\n- If your company does not have a presence on GitLab.com, visit [GitLab.com](www.gitlab.com) and create a new account or log in to an existing one.\n- Create a new company namespace (a group at the root level of gitlab.com).\n- Pick a name that reflects your entire company (and is not already taken).\n\n#### Step 2: Import repository\nFor parallel migration: Use GitLab's pull mirroring feature to automatically sync changes from AWS-hosted repositories to GitLab.com.\n\n1. Navigate to the target group GitLab.com.\n2. In the upper right, click \"New project.\"\n3. On the \"Create new project\" page, click \"Import project.\"\n4. On the \"Import project\" page, click \"Repository by URL.\"\n5. Enter the URL of your AWS-hosted repository in the \"Git repository URL\" field.\n6. Underneath the Git repository URL field, check \"Mirror repository.\"\n7. Set up authentication: in the AWS CodeCommit console, select the clone URL for the repository you will migrate. If you plan on importing CodeCommit repositories into GitLab, you can use the HTTPS CodeCommit URL to clone the repository via GitLab Repository Mirroring. You will need to also provide your Git credentials from AWS for your identity and access management (IAM) user within GitLab. You can create Git credentials for AWS CodeCommit by following this [AWS guide](https://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-gc.html).\n\n![Clone URL](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/clone-url-screenshot__1__aHR0cHM6_1750097822121.png)\n\nThis setup will automatically pull changes from the AWS-hosted repository to GitLab.com every five minutes by default.\n\nFor more information, read our [repository mirroring documentation](https://docs.gitlab.com/ee/user/project/repository/mirror/).\n\n#### Step 3: Test and validate integrations\n\n1. CI/CD pipelines: Set up the `.gitlab-ci.yml` file in GitLab CI to replicate existing pipelines. You can read more about [planning a migration from other CI tools into GitLab CI/CD](https://docs.gitlab.com/ee/ci/migration/plan_a_migration.html).\n2. Issue tracking: Import project issues and test workflows.\n3. Code review: Set up the merge request process and test review workflows.\n\n#### Step 4: Gradual migration\n\n1. Start with small or non-critical projects to familiarize yourself with working on GitLab.com.\n2. Provide training for team members and allow time to adapt to new workflows.\n3. Gradually migrate more projects while ensuring integrations and workflows are problem-free.\n\nFor more information, see [Automating Migrations from CodeCommit to GitLab](https://gitlab.com/guided-explorations/aws/migrating-from-codecommit-to-gitlab/-/blob/main/migrating_codecommit_to_gitlab.md).\n\n#### Step 5: Complete migration\nOnce all tests and validations are complete and the team is comfortable with the new environment, plan for full migration. For each project:\n\n1. Set a migration date and notify all stakeholders.\n2. Perform final data synchronization.\n3. Remove mirroring settings from the GitLab project.\n4. Set AWS-hosted repositories to read-only and transition all development work to GitLab.com.\n\n#### Step 6: Assess adoption of new capabilities\n\nGitLab collaboration and workflow automation for developers is far richer than CodeCommit. It merits some time to learn what these capabilities are. The merge request process is especially rich compared to CodeCommit.\n\nAfter repositories are stable on GitLab, it is very easy to experiment with GitLab CI/CD in parallel to an existing solution. Teams can take time to perfect their GitLab CI/CD automation while production workflows remain unaffected.\n\nGitLab artifact management is also very capable with the Releases feature and many package registries.\n\n### Section 1: Summary\nBy adopting a parallel migration approach to GitLab, you can achieve a smooth transition while minimizing risks. This process allows teams to gradually adapt to the new environment and ensure all integrations and automations function correctly. Cutover migrations only omit a single setting checkbox if it is known that a parallel migration is not necessary.\n\n## Section 2: Integrating GitLab with AWS CodeBuild\n\nFor those wanting to build and test code from GitLab repositories using AWS CodeBuild, this comprehensive guide will help you set up an efficient CI pipeline.\n\n### Prerequisites\n\n- GitLab.com account\n- AWS account\n- AWS CLI (configured)\n\n### Step 1: Create GitLab connection in AWS CodeStar Connections\n\n1. Log in to the AWS Management Console and navigate to the CodeBuild service.\n2. Select \"Settings\" > \"Connections\" from the left navigation panel.\n3. Click the \"Create connection\" button.\n4. Choose \"GitLab\" as the provider.\n5. Enter a connection name and click \"Connect to GitLab.\"\n6. You'll be redirected to the GitLab authentication page.\n7. Approve the necessary permissions.\n8. Once successful, the connection status will change to \"Available.\"\n\n![CodeStar Connect setup](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codestar-connections-setup_aHR0cHM6_1750097822122.png)\n\n### Step 2: Create AWS CodeBuild project\n\n1. Click \"Create build project\" on the CodeBuild dashboard.\n2. Enter a project name and description.\n3. For source settings, select \"GitLab\" as the provider.\n4. Choose the connection you just created and specify the GitLab repository and branch.\n\n![Add CodeBuild project](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codepipeline_step_3_add_codebuild_aHR0cHM6_1750097822123.png)\n\n**Note: From Step 3 forward, please configure the settings according to your specific environment and needs.**\n\n### Summary of Section 2\nThis section explained in detail how to integrate GitLab repositories with AWS CodeBuild. This setup enables a continuous integration pipeline where code changes in GitLab are automatically built and tested using AWS CodeBuild.\n\n## Section 3: Integrating GitLab with AWS CodePipeline\n\nFor those looking to implement continuous delivery from GitLab repositories using AWS CodePipeline, this detailed guide will be helpful. The integration has become even easier now that GitLab is available as an AWS CodeStar Connections provider.\n\n### Prerequisites\n\n- GitLab.com account\n- AWS account\n- AWS CLI (configured)\n\n### Step 1: Create GitLab connection in AWS CodeStar Connections\n\n1. Log in to the AWS Management Console and navigate to the CodePipeline service.\n2. Select \"Settings\" > \"Connections\" from the left navigation panel.\n3. Click the \"Create connection\" button.\n4. Choose \"GitLab\" as the provider.\n5. Enter a connection name and click \"Connect to GitLab.\"\n6. You'll be redirected to the GitLab authentication page.\n7. Approve the necessary permissions.\n8. Once successful, the connection status will change to \"Available.\"\n\n![CodeStar Connections setup](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codestar-connections-setup_aHR0cHM6_1750097822125.png)\n\n### Step 2: Create AWS CodePipeline\n\n1. Click \"Create pipeline\" on the CodePipeline dashboard.\n2. Enter a pipeline name and click \"Next.\"\n3. Select \"GitLab\" as the source provider.\n4. Choose the connection you just created and specify the GitLab repository and branch.\n5. Select the Trigger type: You can trigger CodePipeline pipeline execution based on either pull or push events against specific branches and file types within your repository.\n\n![Add source provider](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codepipeline_step_2_source_provider_aHR0cHM6_1750097822127.png)\n\n![Add source configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codepipeline_step_2_source_configured_aHR0cHM6_1750097822129.png)\n\n**Note: From Step 3 forward, please configure the settings according to your specific environment and needs.**\n\n### Summary of Section 3\nThis section detailed how to integrate GitLab repositories with AWS CodePipeline. This setup enables a continuous delivery pipeline where code changes in GitLab are automatically deployed to your AWS environment.\n\n## Section 4: Migrating to GitLab\n\nIntegrating GitLab with AWS unlocks powerful capabilities for streamlining your development and deployment workflows and helps to solve your source code management woes. This integration can be achieved in several ways, each offering unique benefits:\n\n- Using AWS CodeStar Connections to link GitLab with AWS services enables a more cohesive workflow by allowing external Git repositories, like GitLab, to connect with various AWS services. This setup supports automated builds, deployments, and other essential actions directly from your GitLab repository, making your development process more integrated and streamlined.\n\n- Connecting GitLab with AWS CodePipeline via AWS CodeStar Connections takes automation to the next level by allowing you to create a full CI/CD pipeline. This approach integrates GitLab with AWS CodePipeline, enabling you to automate the entire process – from source control and builds to testing and deployment – using AWS services like CodeBuild and CodeDeploy. This ensures a robust, scalable, and efficient delivery process.\n\n![Chart of new technology and solutions for using GitLab and AWS together](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/Announcing_New_Technology_and_Solutions_for_using_GitLab_and_AWS_Together_aHR0cHM6_1750097822130.png)\n\n1\\. Connecting GitLab with AWS services using AWS CodeStar Connections\n\nAWS CodeStar Connections is a service that allows you to connect external Git repositories (such as GitHub or Bitbucket) to AWS services. You can also connect GitLab to AWS services via CodeStar Connections. When using GitLab, you may need to set up a custom connection as an HTTP Git server.\nThe following AWS services can be connected to GitLab using this method:\n\n- **AWS Service Catalog**\n\nAWS Service Catalog helps organizations standardize and manage AWS resources. Integrating it with GitLab improves transparency in resource management and simplifies change tracking. Specifically, you can automate catalog updates based on GitLab commits, enhancing operational efficiency.\n\n- __AWS CodeBuild__\n\nAWS CodeBuild is a managed build service that compiles source code, runs tests, and produces deployable software packages. Integrating GitLab with CodeBuild allows automated build processes to start whenever code changes are pushed to GitLab. This ensures consistency in builds and facilitates easier collaboration and version control.\n\n- __AWS Glue Notebook Jobs__\n\nAWS Glue Notebook Jobs is a service that allows you to interactively develop and run data preparation and ETL (Extract, Transform, Load) tasks. Integrating GitLab with Glue Notebook Jobs enables version control for notebooks and ETL scripts, promotes collaboration among team members, and improves the quality management of data processing pipelines.\n\n- __AWS Proton__\n\nAWS Proton is a service that automates the development and deployment of microservices and serverless applications. By integrating GitLab with AWS Proton, you can manage infrastructure as code, automate deployments, and ensure consistent environment management, leading to more efficient development processes.\n\nAs AWS CodeStar Connections supports more services, connecting GitLab with additional AWS services will become easier. It's advisable to regularly check for new services that support CodeStar Connections.\n\n2. Connecting CodePipeline with GitLab via AWS CodeStar Connections (including CodeDeploy)\n\nAWS CodePipeline is a continuous delivery service that automates the release process for software. To connect GitLab with CodePipeline, you need to use AWS CodeStar Connections. This setup allows you to designate a GitLab repository as the source and automate the entire CI/CD pipeline.\nThe primary actions supported by CodePipeline include:\n- **Source control:** AWS CodeCommit, GitHub, Bitbucket, GitLab\n- **Build and test:** AWS CodeBuild, Jenkins\n- **Deploy:** AWS CodeDeploy, Elastic Beanstalk, ECS, S3\n- **Approval:** Manual approval\n- **Infrastructure management:** AWS CloudFormation\n- **Serverless:** AWS Lambda\n- **Testing:** AWS Device Farm\n- **Custom Actions:** AWS Step Functions\n\nBy integrating GitLab with CodePipeline, you can automatically trigger the pipeline whenever code changes are pushed to GitLab, allowing a consistent process from build to deployment. Additionally, combining this with GitLab's version control capabilities makes it easier to track deployment history and states, leading to more flexible and reliable software delivery.\n\n## What you've learned\nThis guide has provided comprehensive information on migrating to and integrating GitLab with AWS. Through the four main topics, we've covered:\n- Parallel migration to GitLab: How to gradually migrate from existing AWS-hosted repositories to GitLab.com while minimizing risks.\n- Integration with AWS CodeBuild: Steps to set up a powerful CI environment integrated with GitLab repositories.\n- Integration with AWS CodePipeline: How to build efficient continuous delivery pipelines using GitLab repositories.\n- Downstream integrations for CodePipeline and CodeStar Connections: Leveraging GitLab-AWS connections for widespread service access, unlocking a cascade of integration possibilities across the AWS ecosystem.\n\nAs every organization's code hosting and integration implementation strategy is unique, this tutorial may be used as a starting point for your own GitLab + AWS integration and implementation strategy.\n\n## Additional resources\n\nFor more detailed information and advanced configurations, refer to the following resources:\n\n- [GitLab documentation](https://docs.gitlab.com/)\n- [AWS CodeBuild User Guide](https://docs.aws.amazon.com/codebuild/latest/userguide/welcome.html)\n- [AWS CodePipeline User Guide](https://docs.aws.amazon.com/codepipeline/latest/userguide/welcome.html)\n- [GitLab CI/CD documentation](https://docs.gitlab.com/ee/ci/)\n- [Integrate with AWS](https://docs.gitlab.com/ee/solutions/cloud/aws/gitlab_aws_integration.html)\n\nIf you have questions or need support, please contact [GitLab Support](https://about.gitlab.com/support/) or AWS Support. We hope this comprehensive guide helps you in your AWS-GitLab integration journey.",[9,1126,495,917,1127,693,232],{"slug":5115,"featured":91,"template":700},"ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab","content:en-us:blog:ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab.yml","Ultimate Guide To Migrating From Aws Codecommit To Gitlab","en-us/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab.yml","en-us/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab",{"_path":5121,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5122,"content":5127,"config":5131,"_id":5133,"_type":14,"title":5134,"_source":16,"_file":5135,"_stem":5136,"_extension":19},"/en-us/blog/updating-the-os-version-of-saas-runners-on-linux",{"title":5123,"description":5124,"ogTitle":5123,"ogDescription":5124,"noIndex":6,"ogImage":3806,"ogUrl":5125,"ogSiteName":685,"ogType":686,"canonicalUrls":5125,"schema":5126},"Upgrading the operating system version of our SaaS runners on Linux","With GitLab 17.0, we are updating the operating system version of our SaaS runners on Linux. Learn what will change and how to mitigate potential incompatibilities.","https://about.gitlab.com/blog/updating-the-os-version-of-saas-runners-on-linux","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Upgrading the operating system version of our SaaS runners on Linux\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabriel Engel\"}],\n        \"datePublished\": \"2023-10-04\",\n      }",{"title":5123,"description":5124,"authors":5128,"heroImage":3806,"date":4270,"body":5129,"category":1062,"tags":5130},[2192],"GitLab 17.0, due in May 2024, comes with an upgrade to the\ncontainer-optimized operating system\n([COS](https://cloud.google.com/container-optimized-os/docs)) of the\nephemeral VMs used to execute jobs for [SaaS runners on\nLinux](https://docs.gitlab.com/ee/ci/runners/saas/linux_saas_runner.html).\nThat COS upgrade includes a Docker Engine upgrade from Version 19.03.15 to\nVersion 23.0.5, which introduces a known compatibility issue.  \n\n\n## Who will be impacted by the change?\n\nThe fleet of [SaaS runners on\nLinux](https://docs.gitlab.com/ee/ci/runners/saas/linux_saas_runner.html)\nwith the tags `saas-linux-*-amd64` will receive an update. This change does\nnot affect the SaaS Runners on macOS and Windows.\n\n\n### Using Docker-in-Docker\n\nUsing [Docker-in-Docker based\njobs](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#use-docker-in-docker)\n\nwith a version prior to `20.10` on a host that uses Docker Engine 20.10 or\nnewer, you'll get the following error:\n\n\n```plaintext\n\ncgroups: cgroup mountpoint does not exist: unknown\n\n```\n\n\n### Using Kaniko\n\n[Using Kaniko to build container\nimages](https://docs.gitlab.com/ee/ci/docker/using_kaniko.html) is another\nimpacted use case.\n\nKaniko versions older than `v1.9.0`, are unable to detect the container\nruntime and fail with the error:\n\n\n```plaintext\n\nkaniko should only be run inside of a container, run with the --force flag\nif you are sure you want to continue\n\n```\n\n\n## How to fix the issue\n\nTo fix this, simply update the version of Docker-in-Docker or Kaniko images\nused in your job.\n\nIn general, we strongly advise the regular testing and updating to the\nlatest possible version, and referencing it explicitly in the job\ndefinition.\n\nThis will prevent your jobs from randomly failing when image updates are\npublished.\n\n\n### Using Docker-in-Docker\n\nUpdate your jobs to use `docker:dind` in Version 20.10 or newer, such as:\n\n\n```yaml\n\njob:\n  services:\n  - docker:24.0.5-dind\n  image: docker:24.0.5\n  script:\n  - ...\n```\n\n\n### Using Kaniko\n\nUpdate your jobs to use `gcr.io/kaniko-project/executor` in Version `v1.9.0`\nor newer, such as:\n\n\n```yaml\n\njob:\n  image: gcr.io/kaniko-project/executor:v1.14.0\n  script:\n  - ...\n```\n\n\n## Read more\n\n- [What are SaaS runners?](https://docs.gitlab.com/ee/ci/runners/)\n\n- [SaaS runners on Linux\ndocumentation](https://docs.gitlab.com/ee/ci/runners/saas/linux_saas_runner.html)\n\n- [Building Docker images with Docker\ndocumentation](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html)\n\n- [Building Docker images with Kaniko\ndocumentation](https://docs.gitlab.com/ee/ci/docker/using_kaniko.html)\n",[9,693,1062],{"slug":5132,"featured":6,"template":700},"updating-the-os-version-of-saas-runners-on-linux","content:en-us:blog:updating-the-os-version-of-saas-runners-on-linux.yml","Updating The Os Version Of Saas Runners On Linux","en-us/blog/updating-the-os-version-of-saas-runners-on-linux.yml","en-us/blog/updating-the-os-version-of-saas-runners-on-linux",{"_path":5138,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5139,"content":5145,"config":5150,"_id":5152,"_type":14,"title":5153,"_source":16,"_file":5154,"_stem":5155,"_extension":19},"/en-us/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project",{"title":5140,"description":5141,"ogTitle":5140,"ogDescription":5141,"noIndex":6,"ogImage":5142,"ogUrl":5143,"ogSiteName":685,"ogType":686,"canonicalUrls":5143,"schema":5144},"Use GitLab Duo to build and deploy a simple Quarkus-native project","This tutorial shows how a Java application is compiled to machine code and deployed to a Kubernetes cluster using a CI/CD pipeline. See how AI makes the process faster and more efficient.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666069/Blog/Hero%20Images/AdobeStock_639935439.jpg","https://about.gitlab.com/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Use GitLab Duo to build and deploy a simple Quarkus-native project\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2024-10-17\",\n      }",{"title":5140,"description":5141,"authors":5146,"heroImage":5142,"date":5147,"body":5148,"category":849,"tags":5149},[1506],"2024-10-17","In [“How to automate software delivery using Quarkus and\nGitLab,”](https://about.gitlab.com/blog/how-to-automate-software-delivery-using-quarkus-and-gitlab/)\nyou learned how to develop and deploy a simple Quarkus-JVM application to a\nKubernetes cluster using [GitLab Auto\nDevOps](https://docs.gitlab.com/ee/topics/autodevops/). Now, you'll learn\nhow to use Quarkus-native to compile a Java application to machine code and\ndeploy it to a Kubernetes cluster using a CI/CD pipeline. Follow our journey\nfrom development to deployment leveraging [GitLab\nDuo](https://about.gitlab.com/gitlab-duo/) as our AI companion, including\nthe specific prompts we used.\n\n\n## What is Quarkus?\n\n\n[Quarkus](https://quarkus.io/), also known as the Supersonic Subatomic Java,\nis an open source, Kubernetes-native Java stack tailored to OpenJDK HotSpot\nand GraalVM. The Quarkus project recently moved to the [Commonhaus\nFoundation](https://www.commonhaus.org/), a nonprofit organization dedicated\nto the sustainability of open source libraries and frameworks that provides\na balanced approach to governance and support.\n\n\n## Prerequisites\n\n\nThis tutorial assumes:\n\n\n- You have a running Kubernetes cluster, e.g. GKE.\n\n- You have access to the Kubernetes cluster from your local laptop via the\n`kubectl` command.\n\n- The cluster is connected to your GitLab project.\n\n- You have [Maven (Version 3.9.6 or later)](https://maven.apache.org/)\ninstalled on your local laptop.\n\n- You have Visual Studio Code installed on your local laptop.\n\n\nIf you’d like to set up a Kubernetes cluster connected to your GitLab\nproject, you can follow the instructions in this\n[tutorial](https://about.gitlab.com/blog/eliminate-risk-with-feature-flags-tutorial/),\nup to but not including the “Creating an instance of MySQL database in your\ncluster via Flux” section (you do not need a database for this tutorial).\n\n\nYou will also need to install an nginx ingress in your Kubernetes cluster.\nHere are two ways to do this:\n\n1. You can follow the instructions in [“Creating and importing\nprojects”](https://about.gitlab.com/blog/eliminate-risk-with-feature-flags-tutorial/#creating-and-importing-projects),\nup to the creation of the variable `KUBE_INGRESS_BASE_DOMAIN`.\n\n2. Or, just create an ingress in your Kubernetes cluster by following the\ninstructions in our [Auto DevOps with GKE\ndocumentation](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html#install-ingress).\n\n\n**NOTE:** For this article, we used the first method above to install an\ningress and cert-manager in the Kubernetes cluster.\n\n\n## Creating necessary project files using GitLab Duo Chat\n\n\nWe started our endeavor from VS Code and an empty project called\n`quarkus-native`, which we had previously created in GitLab and had already\ncloned to our local laptop.\n\n\n1. We opened GitLab Duo Chat, within VS Code, and entered the following\nprompt:\n\n\n**_Create a “Hello World” Quarkus application that can be natively\ncompiled_**\n\n\n![hello-world-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/hello-world-prompt.png)\n\n\nChat replied with the prerequisites and process of what to do to create a\nsimple “Hello World” Quarkus application. At this point, we were after the\ngenerated body of the class `HelloResource.java`:\n\n\n```\n\npackage org.acme;\n\n\nimport javax.ws.rs.GET;\n\nimport javax.ws.rs.Path;\n\nimport javax.ws.rs.Produces;\n\nimport javax.ws.rs.core.MediaType;\n\n\n@Path(\"/hello\")\n\npublic class HelloResource {\n\n    @GET\n    @Produces(MediaType.TEXT_PLAIN)\n    public String hello() {\n        return \"Hello World\";\n    }\n}\n\n```\n\n\n2. We knew that for Quarkus we needed a special Dockerfile, named\n`Dockerfile.native`, as part of the creation of the image for the\napplication executable. We then entered the following prompt in Chat:\n\n\n**_Create the Dockerfile.native file for the sample application above_**\n\n\n![create-dockerfile-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675941/Blog/Content%20Images/create-dockerfile-prompt.png)\n\n\nChat replied with a very thorough answer about this file, its contents, and\nthe containerization process. At this point, we were just after the\ngenerated body of the file `Dockerfile.native`:\n\n\n```\n\n####\n\n# This Dockerfile is used in order to build a container that runs the\nQuarkus application in native (no JVM) mode\n\n####\n\nFROM registry.access.redhat.com/ubi8/ubi-minimal:8.6\n\nWORKDIR /work/\n\nRUN chown 1001 /work \\\n    && chmod \"g+rwX\" /work \\\n    && chown 1001:root /work\nCOPY --chown=1001:root target/*-runner /work/application\n\n\nEXPOSE 8080\n\nUSER 1001\n\n\nCMD [\"./application\", \"-Dquarkus.http.host=0.0.0.0\"]\n\n```\n\n\n3. Next, we asked Chat to generate the `pom.xml` for this application by\nentering the following prompt:\n\n\n**_Create the pom.xml file for the sample application above_**\n\n\n![create-pom-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-pom-prompt.png)\n\n\nChat provided the content of the `pom.xml` file, an explanation of its\ncontents, and how to use it. At this point, we were after the generated body\nof the file `pom.xml`:\n\n\n```\n\n\u003C?xml version=\"1.0\"?>\n\n\u003Cproject xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0\nhttps://maven.apache.org/xsd/maven-4.0.0.xsd\"\nxmlns=\"http://maven.apache.org/POM/4.0.0\"\n    xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n  \u003CmodelVersion>4.0.0\u003C/modelVersion>\n  \u003CgroupId>org.acme\u003C/groupId>\n  \u003CartifactId>hello-world-quarkus\u003C/artifactId>\n  \u003Cversion>1.0.0-SNAPSHOT\u003C/version>\n  \u003Cproperties>\n    \u003Ccompiler-plugin.version>3.10.1\u003C/compiler-plugin.version>\n    \u003Cmaven.compiler.release>11\u003C/maven.compiler.release>\n    \u003Cproject.build.sourceEncoding>UTF-8\u003C/project.build.sourceEncoding>\n    \u003Cproject.reporting.outputEncoding>UTF-8\u003C/project.reporting.outputEncoding>\n    \u003Cquarkus.platform.artifact-id>quarkus-bom\u003C/quarkus.platform.artifact-id>\n    \u003Cquarkus.platform.group-id>io.quarkus.platform\u003C/quarkus.platform.group-id>\n    \u003Cquarkus.platform.version>2.16.5.Final\u003C/quarkus.platform.version>\n    \u003CskipITs>true\u003C/skipITs>\n    \u003Csurefire-plugin.version>3.0.0-M7\u003C/surefire-plugin.version>\n  \u003C/properties>\n  \u003CdependencyManagement>\n    \u003Cdependencies>\n      \u003Cdependency>\n        \u003CgroupId>${quarkus.platform.group-id}\u003C/groupId>\n        \u003CartifactId>${quarkus.platform.artifact-id}\u003C/artifactId>\n        \u003Cversion>${quarkus.platform.version}\u003C/version>\n        \u003Ctype>pom\u003C/type>\n        \u003Cscope>import\u003C/scope>\n      \u003C/dependency>\n    \u003C/dependencies>\n  \u003C/dependencyManagement>\n  \u003Cdependencies>\n    \u003Cdependency>\n      \u003CgroupId>io.quarkus\u003C/groupId>\n      \u003CartifactId>quarkus-arc\u003C/artifactId>\n    \u003C/dependency>\n    \u003Cdependency>\n      \u003CgroupId>io.quarkus\u003C/groupId>\n      \u003CartifactId>quarkus-resteasy\u003C/artifactId>\n    \u003C/dependency>\n    \u003Cdependency>\n      \u003CgroupId>io.quarkus\u003C/groupId>\n      \u003CartifactId>quarkus-junit5\u003C/artifactId>\n      \u003Cscope>test\u003C/scope>\n    \u003C/dependency>\n    \u003Cdependency>\n      \u003CgroupId>io.rest-assured\u003C/groupId>\n      \u003CartifactId>rest-assured\u003C/artifactId>\n      \u003Cscope>test\u003C/scope>\n    \u003C/dependency>\n  \u003C/dependencies>\n  \u003Cbuild>\n    \u003Cplugins>\n      \u003Cplugin>\n        \u003CgroupId>${quarkus.platform.group-id}\u003C/groupId>\n        \u003CartifactId>quarkus-maven-plugin\u003C/artifactId>\n        \u003Cversion>${quarkus.platform.version}\u003C/version>\n        \u003Cextensions>true\u003C/extensions>\n        \u003Cexecutions>\n          \u003Cexecution>\n            \u003Cgoals>\n              \u003Cgoal>build\u003C/goal>\n              \u003Cgoal>generate-code\u003C/goal>\n              \u003Cgoal>generate-code-tests\u003C/goal>\n            \u003C/goals>\n          \u003C/execution>\n        \u003C/executions>\n      \u003C/plugin>\n      \u003Cplugin>\n        \u003CartifactId>maven-compiler-plugin\u003C/artifactId>\n        \u003Cversion>${compiler-plugin.version}\u003C/version>\n        \u003Cconfiguration>\n          \u003CcompilerArgs>\n            \u003Carg>-parameters\u003C/arg>\n          \u003C/compilerArgs>\n        \u003C/configuration>\n      \u003C/plugin>\n      \u003Cplugin>\n        \u003CartifactId>maven-surefire-plugin\u003C/artifactId>\n        \u003Cversion>${surefire-plugin.version}\u003C/version>\n        \u003Cconfiguration>\n          \u003CsystemPropertyVariables>\n            \u003Cjava.util.logging.manager>org.jboss.logmanager.LogManager\u003C/java.util.logging.manager>\n            \u003Cmaven.home>${maven.home}\u003C/maven.home>\n          \u003C/systemPropertyVariables>\n        \u003C/configuration>\n      \u003C/plugin>\n      \u003Cplugin>\n        \u003CartifactId>maven-failsafe-plugin\u003C/artifactId>\n        \u003Cversion>${surefire-plugin.version}\u003C/version>\n        \u003Cexecutions>\n          \u003Cexecution>\n            \u003Cgoals>\n              \u003Cgoal>integration-test\u003C/goal>\n              \u003Cgoal>verify\u003C/goal>\n            \u003C/goals>\n            \u003Cconfiguration>\n              \u003CsystemPropertyVariables>\n                \u003Cnative.image.path>${project.build.directory}/${project.build.finalName}-runner\u003C/native.image.path>\n                \u003Cjava.util.logging.manager>org.jboss.logmanager.LogManager\u003C/java.util.logging.manager>\n                \u003Cmaven.home>${maven.home}\u003C/maven.home>\n              \u003C/systemPropertyVariables>\n            \u003C/configuration>\n          \u003C/execution>\n        \u003C/executions>\n      \u003C/plugin>\n    \u003C/plugins>\n  \u003C/build>\n  \u003Cprofiles>\n    \u003Cprofile>\n      \u003Cid>native\u003C/id>\n      \u003Cactivation>\n        \u003Cproperty>\n          \u003Cname>native\u003C/name>\n        \u003C/property>\n      \u003C/activation>\n      \u003Cproperties>\n        \u003CskipITs>false\u003C/skipITs>\n        \u003Cquarkus.package.type>native\u003C/quarkus.package.type>\n      \u003C/properties>\n    \u003C/profile>\n  \u003C/profiles>\n\u003C/project>\n\n```\n\n\n4. We then asked Chat to generate the Quarkus test file for this application\nby entering the following prompt:\n\n\n**_Create the content for the Quarkus test for the application above_**\n\n\n![create-quarkus-test-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-quarkus-test-prompt.png)\n\n\nChat returned the body for the Quarkus test, how to use it, and the body of\nthe Java class for the test needed for its native compilation. At this\npoint, we were after the generated bodies of the file\n`HelloResourceTest.java` and `HelloResourceIT.java`.\n\n\nHere is the generated body for `HelloResourceTest.java`:\n\n\n```\n\npackage org.acme;\n\n\nimport io.quarkus.test.junit.QuarkusTest;\n\nimport org.junit.jupiter.api.Test;\n\n\nimport static io.restassured.RestAssured.given;\n\nimport static org.hamcrest.CoreMatchers.is;\n\n\n@QuarkusTest\n\npublic class HelloResourceTest {\n\n    @Test\n    public void testHelloEndpoint() {\n        given()\n          .when().get(\"/hello\")\n          .then()\n             .statusCode(200)\n             .body(is(\"Hello World\"));\n    }\n\n}\n\n```\n\n\nHere is the generated body for `HelloResourceIT.java`:\n\n\n```\n\npackage org.acme;\n\n\nimport io.quarkus.test.junit.QuarkusIntegrationTest;\n\n\n@QuarkusIntegrationTest\n\npublic class HelloResourceIT extends HelloResourceTest {\n    // Execute the same tests but in native mode.\n}\n\n```\n\n\n5. We needed to know how to organize these files in the GitLab project, so\nwe asked about the directory structure for all these files by entering the\nfollowing prompt in Chat:\n\n\n**_Give me the entire directory structure for this project including the\nlocation of each file, e.g. pom.xml, Dockerfile.native,\napplication.properties, HelloResource.java, HelloResourceTest.java, and the\nlocation of the target directory_**\n\n\n![create-dir-struct-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-dir-struct-prompt.png)\n\n\nChat replied with a detailed diagram about the entire directory structure\nfor the project and where all these files should be located as well as a\ndescription of the purpose of each of them. It even mentioned that the\ndirectory `target/` and its contents should not be version controlled since\nit was generated by the build process. Another interesting aspect of the\nreply was the existence of a file called `resources/application.properties`\nin the directory structure.\n\n\n![dir-struct-chat-response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/dir-struct-chat-response.png)\n\n\nWith all this information in our hands, we were ready to start creating\nthese files in our GitLab project.\n\n\n## Populating our project with the generated content for each file\n\n\nWe created each of the following files in their corresponding location and\ntheir generated content as provided by Chat:\n\n\n- `src/main/java/org/acme/HelloResource.java`\n\n- `resources/application.properties`\n\n- `src/test/java/org/acme/HelloResourceTest.java`\n\n- `src/test/java/org/acme/HelloResourceIT.java`\n\n- `pom.xml`\n\n- `Dockerfile.native`\n\n\n**NOTE:** We considered using GitLab Auto Deploy for this endeavor but later\nrealized that it would not be a supported option. We are mentioning this\nbecause in the video at the end of this tutorial, you will see that we asked\nChat: `How to set the service internalPort to 8080 for auto deploy`. Then we\ncreated a file named `.gitlab/auto-deploy-values.yaml` with the generated\ncontent from Chat. The creation of this file is not necessary for this\ntutorial.\n\n\nBefore we started tackling the pipeline to build, containerize, and deploy\nthe application to our Kubernetes cluster, we decided to generate the\nexecutable locally on our Mac and test the application locally.\n\n\n## Testing the application locally\n\n\nHere is the process we went through to test the application on our local\nmachine.\n\n\n1. To build the application on the local Mac laptop, from a Terminal window,\nwe entered the following command:\n\n\n```\n\nmvn clean package -Pnative\n\n```\n\n\n![first-build](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/first-build.png)\n\n\nThe native compilation failed with the error message:\n\n\n`Cannot find the ‘native-image’ in the GRAALVM_HOME, JAVA_HOME and System\nPATH. Install it using ‘gu install native-image’`\n\n\n2. So, we used our trusty GitLab Duo Chat again and asked it the following:\n\n\n**_The command “mvn clean package -Pnative” is failing with error\n“java.lang.RuntimeException: Cannot find the ‘native-image’ in the\nGRAALVM_HOME, JAVA_HOME and System PATH. Install it using gu install\nnative-image”. I’m using a MacOS Sonoma. How do I fix this error on my\nMac?_**\n\n\n![how-to-fix-build-failure-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-fix-build-failure-prompt.png)\n\n\nChat replied with a detailed set of steps on how to install the necessary\nsoftware and set the appropriate environment variables.\n\n\n3. We copied and pasted the following commands from the Chat window to a\nTerminal window:\n\n\n```\n\nbrew install –cask graalvm/tap/graalvm-ce-java17\n\nexport JAVA_HOME=/Library/Java/JavaVIrtualMachines/graalvm-ce-java17-22.3.1\n\nexport GRAALVM_HOME=${JAVA_HOME}\n\nexport PATH=${GRAALVM_HOME}/bin:$PATH\n\nxattr -r -d com.apple.quarantine ${GRAALVM_HOME}/../..\n\ngu install native-image\n\n```\n\n\nThe commands above installed the community edition of GraalVM Version 22.3.1\nthat supported Java 17. We noticed, during the brew install, that the\nversion of the GraalVM being installed was `java17-22.3.1`, so we had to\nupdate the pasted value for `JAVA_HOME` from `graalvm-ce-java17-22.3.0` to\n`graalvm-ce-java17-22.3.1`.\n\n\nWe also had to run the `xattr` command to get the GraalVM, which we had\ndownloaded and installed on our Mac, out of quarantine so that it could run\nlocally. Lastly, we installed the GraalVM native-image.\n\n\n4. At this point, we again, from a Terminal window, entered the following\ncommand to build the application on the local Mac laptop:\n\n\n```\n\nmvn clean package -Pnative\n\n```\n\n\nThis time the compilation was successful and an executable was generated in\nthe `target` directory.\n\n\n![successful-local-compilation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/successful-local-compilation.png)\n\n\n5. We ran the executable by entering the following commands from a Terminal\nwindow:\n\n\n```\n\ncd target\n\n./quarkus-native-1.0.0-SNAPSHOT-runner “-Dquarkus.http.host=0.0.0.0”\n\n```\n\n\n![executable-local-run](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/executable-local-run.png)\n\n\n6. With the application running, we opened a browser window, and in the URL\nfield, we entered:\n\n\n```\n\nhttp://localhost:8080/hello\n\n```\n\n\n![app-running-locally](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/app-running-locally.png)\n\n\nThe application returned the string `Hello World`, which was displayed in\nthe browser window.\n\n\nAt this point, we committed and pushed all the changes to our GitLab project\nand started working on creating a CI/CD pipeline that would build and deploy\nthe application to a Kubernetes cluster running on the cloud.\n\n\nBut before continuing, we remembered to add, commit, and push a `.gitignore`\nfile to our project that included the path `target/`, since this was the\ndirectory where the executable would be created and we didn’t need to keep\nit - or its contents - under version control.\n\n\n## Creating the pipeline with GitLab Duo Chat\n\n\nNow that we had already successfully tested the application locally on our\nMac, we needed to create the CI/CD pipeline that would compile the\napplication, containerize it, and deploy it to our Kubernetes cluster. We\nwanted to keep the pipeline simple, brief, and have a single environment in\nwhich to deploy it. To this end, the pipeline would not tackle multiple\nenvironments or feature branches, for example.\n\n\n1. To avoid manually creating a pipeline from scratch, we decided to once\nagain leverage Chat. We entered the following prompt\n\n\n**_Create a .gitlab-ci.yml file with 3 stages: build, containerize, and\ndeploy. Each of these stages should have a single job with the same name.\nThe build job should compile the application natively using the -Pnative\nmaven option and the builder image for mandrel jdk-22 for java17 and store\nthe application executable and its Dockerfile as artifacts. The containerize\njob should use docker to build and push the image to the built-in container\nregistry. The deploy job should rollout the containerized application, named\nquarkus-app, to the production environment in the Kubernetes cluster by\ncreating a deployment resource, a service resource and an ingress rule with\nhost ${KUBE_INGRESS_BASE_DOMAIN} and /hello prefix. The deploy job should\nalso include the url for the production environment. When setting the\nKubernetes context, use ${KUBE_CONTEXT}, and for application namespace, use\n${KUBE_NAMESPACE}_**\n\n\n![create-pipeline-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-pipeline-prompt.png)\n\n\nThe prompt above provided some points that we thought would be useful and\nnecessary for a good outcome. The requirements listed by the prompt were:\n\n\n- It specified that the pipeline needed to have three stages: build,\ncontainerize, and deploy.\n\n- Each stage would have a single job.\n\n- The build job should natively compile (not build) the application using\nthe -Pnative option.\n\n- The build job should use the open source software `mandrel` and use the\nversion that would support jdk-22 for Java 17. Remember that these were the\nversions that we tested on our local machine.\n\n- The build job would also need to pass the resulting executable and the\nproject Dockerfile to the next job, i.e. containerize, in the pipeline.\n\n- The containerize job should build a docker image with the executable using\nthe Dockerfile, both of which were passed as artifacts.\n\n- The containerize job should push the image to the GitLab container\nregistry.\n\n- The deploy job should use the image from the GitLab container registry,\nname it `quarkus-app`, and deploy it to the Kubernetes cluster using\ndeployment, service and ingress rule resources.\n\n- The deploy job should use the environment variables\n`${KUBE_INGRESS_BASE_DOMAIN}`, `${KUBE_CONTEXT}`, and `${KUBE_NAMESPACE}`,\nwhich had been defined as group/project variables, accordingly.\n\n\nChat responded with the following `.gitlab-ci.yml` example:\n\n\n```\n\n# Define the stages for the pipeline\n\nstages:\n  - build\n  - containerize\n  - deploy\n\n# Variables section to use GitLab CI/CD variables\n\nvariables:\n  MAVEN_OPTS: \"-Dmaven.repo.local=.m2/repository\"\n  MAVEN_CLI_OPTS: \"--batch-mode --errors --fail-at-end --show-version -DinstallAtEnd=true -DdeployAtEnd=true\"\n  CONTAINER_IMAGE: ${CI_REGISTRY_IMAGE}:${CI_COMMIT_SHA}\n\n# Cache configuration for Maven dependencies\n\ncache:\n  paths:\n    - .m2/repository\n\n# Build stage: Compile the application natively\n\nbuild:\n  stage: build\n  image:\n    name: quay.io/quarkus/ubi-quarkus-mandrel-builder-image:22.3-java17\n    entrypoint:\n      - \"\"\n  script:\n    - ./mvnw ${MAVEN_CLI_OPTS} clean package -Pnative\n  artifacts:\n    paths:\n      - target/*-runner\n      - src/main/docker/Dockerfile.native-micro\n  # This job runs only on the main branch\n  only:\n      - main\n\n# Containerize stage: Build and push Docker image\n\ncontainerize:\n  stage: containerize\n  image: docker:20.10.16\n  services:\n    - docker:20.10.16-dind\n  variables:\n    DOCKER_TLS_CERTDIR: \"/certs\"\n  before_script:\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n  script:\n    - docker build -f src/main/docker/Dockerfile.native-micro -t ${CONTAINER_IMAGE} .\n    - docker push ${CONTAINER_IMAGE}\n  # This job runs only on the main branch\n  only:\n      - main\n\n# Deploy stage: Deploy to Kubernetes\n\ndeploy:\n  stage: deploy\n  image: \n    name: bitnami/kubectl:latest\n    entrypoint: ['']\n  script:\n    - kubectl config use-context ${KUBE_CONTEXT}\n    - kubectl config set-context --current --namespace=${KUBE_NAMESPACE}\n    - |\n      cat \u003C\u003CEOF | kubectl apply -f -\n      apiVersion: apps/v1\n      kind: Deployment\n      metadata:\n        name: quarkus-app\n      spec:\n        replicas: 1\n        selector:\n          matchLabels:\n            app: quarkus-app\n        template:\n          metadata:\n            labels:\n              app: quarkus-app\n          spec:\n            containers:\n            - name: quarkus-app\n              image: ${CONTAINER_IMAGE}\n              ports:\n              - containerPort: 8080\n      ---\n      apiVersion: v1\n      kind: Service\n      metadata:\n        name: quarkus-app\n      spec:\n        selector:\n          app: quarkus-app\n        ports:\n        - port: 80\n          targetPort: 8080\n      ---\n      apiVersion: networking.k8s.io/v1\n      kind: Ingress\n      metadata:\n        name: quarkus-app\n      spec:\n        rules:\n        - host: ${KUBE_INGRESS_BASE_DOMAIN}\n          http:\n            paths:\n            - path: /hello\n              pathType: Prefix\n              backend:\n                service:\n                  name: quarkus-app\n                  port: \n                    number: 80\n      EOF\n    - echo \"Application deployed to https://${KUBE_INGRESS_BASE_DOMAIN}/hello\"\n  # This job runs only on the main branch\n  only:\n      - main\n```\n\n\n2. There were some things we needed to adjust in the sample `.gitlab-ci.yml`\nfile above before we could commit it to our `main` branch. These are the\nupdates we made to the file:\n\n\n- We deleted all occurrences of `only: -main` because we wanted to keep of\npipeline definition file simple and with no branch-related rules.\n\n- We fixed the name of the file `Dockerfile.native-micro` to\n`Dockerfile.native`.\n\n\n3. At this point, we wanted to ensure that the deployment would be to the\n`production` environment so we asked Chat the following prompt:\n\n\n**_What is the syntax to specify an environment with its url in a\npipeline?_**\n\n\n![how-to-add-env-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-add-env-prompt.png)\n\n\nThe response from Chat included an example of how to do this so we used this\ninformation to add the following environment block to our pipeline:\n\n\n```\n  environment:\n       name: production\n       url: http://${KUBE_INGRESS_BASE_DOMAIN}/hello\n```\n\n\n4. The example provided by Chat includes a URL that started with `https` and\nwe modified that to `http` since we didn’t really need a secure connection\nfor this simple application.\n\n\n5. Lastly, we noticed that in the `build` job, there was a script `mvnw`\nthat we didn’t have in our project. So, we asked Chat the following:\n\n\n**_How can I get the mvnw script for Quarkus?_**\n\n\n![how-to-add-mvnw-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-add-mvnw-prompt.png)\n\n\nChat responded with the command to execute to bootstrap and create this\nscript. We executed this command from a Terminal window:\n\n\n```\n\nmvn wrapper:wrapper\n\n```\n\n\nWe were now ready to commit all of our changes to the `main` branch and have\nthe pipeline executed. However, on our first attempt, our first pipeline\nfailed at the build job.\n\n\n## Troubleshooting using GitLab Duo Root Cause Analysis\n\n\nOur first attempt at running our brand-new pipeline failed. So, we took\nadvantage of [GitLab Duo Root Cause\nAnalysis](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/),\nwhich looks at the job logs and provides a thorough natural language\nexplanation (with examples) of the root cause of the problem and, most\nimportantly, how to fix it.\n\n\n![build-job-troubleshooting](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/build-job-troubleshooting.png)\n\n\nRoot Cause Analysis recommended we look at the compatibility of the command\nthat was trying to be executed with the image of mandrel used in the build\njob. We were not using any command with the image so we concluded that it\nmust have been the predefined `entrypoint` for the image itself. We needed\nto override this so we asked Chat the following:\n\n\n**_How do I override the entrypoint of an image using gitlab keywords?_**\n\n\n![how-to-override-entrypoint-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-override-entrypoint-prompt.png)\n\n\nChat replied with some use case examples of overriding an image entry point.\nWe used that information to update the build job image definition:\n\n\n```\n\nbuild:\n    stage: build\n    image: quay.io/quarkus/ubi-quarkus-mandrel-builder-image:22.3-java17\n    entrypoint:\n        - “”\n```\n\n\nWe committed our changes to the `main` branch, which launched a new instance\nof the pipeline. This time the build job executed successfully but the\npipeline failed at the `containerize` job.\n\n\n## Running a successful pipeline\n\n\nBefore drilling down into the log of the failed `containerize` job, we\ndecided to drill into the log of the successfully completed build job first.\nEverything looked good in the log of the build job with the exception of\nthis warning message at the very end of it:\n\n\n```\n\nWARNING: src/main/docker/Dockerfile.native: no matching files. Ensure that\nthe artifact path is relative to the working directory …\n\n``` \n\n\nWe took notice of this warning and then headed to the log of the failed\n`containerize` job. In it, we saw that the `docker build` command had failed\ndue to a non-existent Dockerfile. We ran Root Cause Analysis on the job and\namong its suggested fixes was for us to verify that the project structure\nmatched the path of the specified `Dockerfile.native` file.\n\n\n![containerize-job-troubleshooting](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/containerize-job-troubleshooting.png)\n\n\nThis information confirmed our suspicion of the misplaced\n`Dockerfile.native` file. Instead of being at the directory\n`src/main/docker` as specified in the pipeline, it was located at the root\ndirectory of the project.\n\n\nSo, we went back to our project and updated every occurrence of the location\nof this file in our `.gitlab-ci.yml` file. We modified the two locations\nwhere this happened, one in the `build` job and one in the `containerize`\njob, as follows:\n\n\n```\n\nsrc/main/docker/Dockerfile.native\n\n```\n\n\nto\n\n\n```\n\nDockerfile.native\n\n```\n\n\nWe committed our updates to the `main` branch and this time our entire\npipeline executed successfully!\n\n\n![pipeline-successful-run](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/pipeline-successful-run.png)\n\n\nOur last step was to check the running application in the `production`\nenvironment in our Kubernetes cluster.\n\n\n## Accessing the deployed application running in cluster\n\n\nOnce the pipeline ran successfully to completion, we drilled in the log file\nfor the `deploy` job. Remember, this job printed the URL of the application\nat the end of its execution. We scrolled down to the bottom of the log and\nclicked on the `https` application link, which opened a browser window\nwarning us that the connection was not private (we disabled `https` for the\nenvironment URL but forgot it for this string). We proceeded past the\nbrowser warning and then the string \"Hello World\" was displaced in the\nbrowser window indicating that the application was up and running in the\nKubernetes cluster.\n\n\nFinally, to double-check our production deployment URL, we headed to the\nproject **Operate > Environments** window, and clicked on the \"Open\" button\nfor it, which immediately opened a browser window with the \"Hello World\"\nmessage.\n\n\n![app-running-on-k8s](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/app-running-on-k8s.png)\n\n\n## Try it \n\n\nWe created, compiled, built, and deployed a simple Quarkus application to a\nKubernetes cluster using [GitLab Duo](https://about.gitlab.com/gitlab-duo/).\nThis approach allowed us to be more efficient and productive in all the\ntasks that we performed and it helped us streamline our DevSecOps processes.\nWe have shown only a small portion of how GitLab Duo's AI-powered\ncapabilities can help you, namely Chat and Root Cause Analysis. There’s so\nmuch more you can leverage in GitLab Duo to help you create better software\nfaster and more securely.\n\n\nWatch this whole use case in action:\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/xDpycxz3RPY?si=HHZrFt1O_8XoLATf\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\nAll the project assets we used are available\n[here](https://gitlab.com/gitlab-da/use-cases/ai/ai-applications/quarkusn/quarkus-native).\n\n\n> [Try GitLab Duo for free](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial&toggle=gitlab-duo-pro)\nand get started on exciting projects like this.\n",[851,917,696,896,695,693,9],{"slug":5151,"featured":91,"template":700},"use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project","content:en-us:blog:use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project.yml","Use Gitlab Duo To Build And Deploy A Simple Quarkus Native Project","en-us/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project.yml","en-us/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project",{"_path":5157,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5158,"content":5164,"config":5169,"_id":5171,"_type":14,"title":5172,"_source":16,"_file":5173,"_stem":5174,"_extension":19},"/en-us/blog/use-inputs-in-includable-files",{"title":5159,"description":5160,"ogTitle":5159,"ogDescription":5160,"noIndex":6,"ogImage":5161,"ogUrl":5162,"ogSiteName":685,"ogType":686,"canonicalUrls":5162,"schema":5163},"Define input parameters to includable CI/CD configuration files","This is the first milestone of the long-term roadmap of the CI/CD Components Catalog roadmap.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679170/Blog/Hero%20Images/migration-data.jpg","https://about.gitlab.com/blog/use-inputs-in-includable-files","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Define input parameters to includable CI/CD configuration files\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-05-08\",\n      }",{"title":5159,"description":5160,"authors":5165,"heroImage":5161,"date":5166,"body":5167,"category":718,"tags":5168},[1835],"2023-05-08","In GitLab 15.11, we introduced an exciting new feature that allows users to\n[define input parameters for includable configuration\nfiles](/releases/2023/04/22/gitlab-15-11-released/#define-inputs-for-included-cicd-configuration).\nWith the ability to use input parameters in [CI\ntemplates](https://docs.gitlab.com/ee/development/cicd/templates.html), you\ncan replace any keyword in the template with a parameter, including stage,\nscript, or job name. For example, you can add a prefix to all of the jobs to\nbetter isolate them from the pipeline into which you are including the\nconfiguration.\n\n\nThese input parameters can be declared as mandatory or optional for each\nconfiguration file, reducing the need for global variables and making your\nCI/CD templates more robust and isolated. The input parameters are scoped to\nthe included configuration only, which means they have no impact on the rest\nof the pipeline. This allows you to declare and enforce constraints, for\nexample by enforcing mandatory inputs for templates.\n\n\nThis development is the first milestone of the long-term roadmap of the\n[CI/CD Components\nCatalog](https://gitlab.com/groups/gitlab-org/-/epics/7462), a new feature\nthat will allow users to search and reuse single-purpose CI/CD configuration\nunits with specific parameters for their use case. If you want to learn more\nabout this exciting new development, you can read our [blog post about our\nCI templates feature](/blog/how-to-build-reusable-ci-templates/).\n\n\nIn this technical blog post, we will provide step-by-step instructions on\nhow to define CI/CD templates with input parameters and how to use them when\nincluding templates.\n\n\n## Step 1: Create a template YAML document\n\nThe first step is to create a template YAML document that describes what\ninput arguments can be used with the template. The second part of the\ntemplate is the definition of the jobs that may include references to values\nusing the interpolation format `$[[ inputs.input-name ]]`. You should use\nthree dash lines between the two parts.\n\n\nHere is an example of a deploy-template.yml:\n\n\n```yaml\n\nspec:\n  inputs:\n    website:\n    environment:\n      default: test\n---\n\ndeploy:\n  stage: deploy\n  script: echo \"deploy $[[ inputs.website ]] to $[[ inputs.environment ]]\"\n```\n\n\nIn this template, we have defined two input parameters: website and\nenvironment. The environment parameter has a default value. In the content\nsection, we define a job that interpolates the input arguments.\n\n\n## Step 2: Include the template in the CI configuration\n\nIn your main CI configuration file `.gitlab-ci.yml`, include the template\nand add input parameters using the `inputs` keyword.\n\n\nHere is an example of including the `deploy-template.yml` with input\nparameters:\n\n\n```yaml\n\ninclude:\n  - local: deploy-template.yml\n    inputs:\n      website: my-website.example.com\n```\n\n\nIn this example, we included a local template in our project. Note: You can\nuse `inputs` with the other [include\ntypes](https://docs.gitlab.com/ee/ci/yaml/index.html#include) such as\n`include:project`, `include:template`, `include:remote`.\n\n\nIn the below example, we use inputs to add a prefix to jobs name, and make\nthe stage dynamic as well.\n\n\n```yaml\n\nspec:\n  inputs:\n    website:\n    environment:\n      default: staging\n    stage:\n      default: test\n    job_prefix:\n      default: \"\"\n---\n\n\"$[[ inputs.job_prefix ]]deploy\":\n  stage: $[[ inputs.stage ]]\n  script: echo \"deploy $[[ inputs.website ]] to $[[ inputs.environment ]]\"\n```\n\n\nThen we can include it from the `.gitlab-ci.yml` with the input parameters:\n\n\n```\n\ninclude:\n  - local: deploy-template.yml\n    inputs:\n      stage: deploy\n      website: http://example.com\n      environment: production\n      job_prefix: \"my-app-\"\n```\n\n\nYou can [fork](https://gitlab.com/tech-marketing/ci-interpolation-example)\nthis project, which uses the above examples:\n\n\n- [Dynamic\njob](https://gitlab.com/tech-marketing/ci-interpolation-example/-/blob/main/dynamic-job.yml)\n\n- [Dynamic\nscript](https://gitlab.com/tech-marketing/ci-interpolation-example/-/blob/main/deploy-template.yml)\n\n- [Main CI\nconfiguration](https://gitlab.com/tech-marketing/ci-interpolation-example/-/blob/main/.gitlab-ci.yml)\n\n\nFor more information, please use our [online\ndocumentation](https://docs.gitlab.com/ee/ci/yaml/includes.html#define-input-parameters-with-specinputs).\n\n\nThat's it! You have successfully created CI templates that accept inputs and\nused them in a pipeline configuration. By using templates with inputs, you\ncan simplify pipeline configuration and make templates more modular and\nreusable.\n\n\nThank you to [Fabio Pitino](https://gitlab.com/fabiopitino) and [Grzegorz\nBizon](https://gitlab.com/grzesiek) for their content reviews.\n",[917,721,9],{"slug":5170,"featured":6,"template":700},"use-inputs-in-includable-files","content:en-us:blog:use-inputs-in-includable-files.yml","Use Inputs In Includable Files","en-us/blog/use-inputs-in-includable-files.yml","en-us/blog/use-inputs-in-includable-files",{"_path":5176,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5177,"content":5182,"config":5189,"_id":5191,"_type":14,"title":5192,"_source":16,"_file":5193,"_stem":5194,"_extension":19},"/en-us/blog/use-multiproject-pipelines-with-gitlab-cicd",{"title":5178,"description":5179,"ogTitle":5178,"ogDescription":5179,"noIndex":6,"ogImage":2088,"ogUrl":5180,"ogSiteName":685,"ogType":686,"canonicalUrls":5180,"schema":5181},"Multi-project pipelines for streamlined repository workflow","You can connect CI/CD pipelines and artifacts for multiple related projects to make managing interactions easy.","https://about.gitlab.com/blog/use-multiproject-pipelines-with-gitlab-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to streamline interactions between multiple repositories with multi-project pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fabio Busatto\"}],\n        \"datePublished\": \"2018-10-31\",\n      }",{"title":5183,"description":5179,"authors":5184,"heroImage":2088,"date":5186,"body":5187,"category":718,"tags":5188},"How to streamline interactions between multiple repositories with multi-project pipelines",[5185],"Fabio Busatto","2018-10-31","\nModern software products consist of different components and\n[microservices](/topics/microservices/) that work together, relying on many libraries and dependencies:\nbecause of this, many projects cannot be limited to one single repository.\n\nWith [GitLab 9.3](/releases/2017/06/22/gitlab-9-3-released/#multi-project-pipeline-graphs)\nwe released [multi-project pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html)\nto make interactions between different repositories easy to manage. Here's a look at how they work.\n\nNote: these features are available in GitLab [Premium](/pricing/#self-managed), [Gold subscriptions](/pricing/#gitlab-com),\nand public projects on GitLab.com only.\n{: .note}\n\n## What are multi-project pipelines, and how do they help?\n\nMulti-project pipelines span multiple repositories, creating a\nconnection between them. But what is technically possible to achieve, and how?\n\n### Start an external pipeline directly from your job\n\nThe most important feature is the ability to trigger an external pipeline\nfrom `gitlab-ci.yml`: using the special variable `$CI_JOB_TOKEN` and the\n[Pipeline Trigger API](https://docs.gitlab.com/ee/api/pipeline_triggers.html)\nyou can start another pipeline in a different project directly from your job,\nwithout setting any additional authentication token or configuration in the\ntarget project: GitLab automatically detects the user running the caller\npipeline, and run the target one with the same privileges.\n\nThe [`$CI_JOB_TOKEN` variable](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html)\nis automatically created when a job starts: it is associated with the user\nthat is running the job, so GitLab is able to enforce permissions when\ndealing with other related projects. It is also very limited in capabilities,\nand it is automatically destroyed as soon as the job ends, to prevent abuses.\n\n### Easily view related pipelines\n\nAnother very useful feature is the ability to see how projects are linked\ntogether directly in the [pipeline graph](https://docs.gitlab.com/ee/ci/pipelines/index.html#pipeline-graphs):\nupstream and downstream stages are rendered as squared boxes and connected\nto the main flow. They give you the status of the related pipelines and you\ncan easily jump to them by clicking the boxes. This feature is also available\nin the pipeline mini-graph that is shown in the Merge Request Widget (this\nfeature was released with [GitLab 9.4](/releases/2017/07/22/gitlab-9-4-released/#mini-graph-for-multi-project-pipelines)).\n\n![Multi-project pipeline graph](https://about.gitlab.com/images/blogimages/multi_project_pipeline_graph.png){: .shadow.center}\n *\u003Csmall>See how upstream and downstream pipelines are shown on both sides of the graph\u003C/small>*\n\n### Download artifacts from another project\n\nYou can also use the `$CI_JOB_TOKEN` variable with the Jobs API in order to\n[download artifacts](https://docs.gitlab.com/ee/api/jobs.html#get-job-artifacts)\nfrom another project. This is very helpful if one of the related pipelines\ncreates a dependency that you need (this has been possible since\n[GitLab 9.5](/releases/2017/08/22/gitlab-9-5-released/#cijobtoken-variable-for-artifacts-api)).\n\n## Why do we need multi-project pipelines?\n\nLet's see how multi-project pipelines could be very useful when dealing\nwith real-life projects.\n\n### Automatically test changes across all connected components\n\nA common development pattern is to have an API provider, a web\nfrontend, and some additional services (bulk data processing, email management,\netc). Each of these components has its own life in a different repository,\nbut they are strictly connected: a change in one of them should trigger\nbuilds and integration tests in all the related projects in order to check\nthat the changes are not introducing unintended behaviors. Linking those\nprojects with multi-project pipelines automates this task, and users\nwill receive notifications in case of failures.\n\n### Automatically trigger downstream pipelines for packaging\n\nAnother common scenario where multi-project pipelines can be used to simplify\nthe development workflow is packaging and releasing software: every time a\nchange is pushed to the stable branch, a downstream pipeline for the repository\nthat is responsible for packaging the application is triggered automatically.\nThis pipeline can easily fetch the latest artifacts from all the repositories\nthat contain the components of the application and create a Docker image or a\npackage that can be then published and distributed.\n\n## Example application\n\nYou can find [an example application here](https://gitlab.com/gitlab-examples/multi-project-pipelines/).\nIt consists of a Maven package and a command line app that uses it as a dependency.\n\nThe package is built and deployed to the [GitLab Maven Repository](https://docs.gitlab.com/ee/user/packages/maven_repository/index.html),\nthen it triggers a multi-project pipeline to update the entire application.\n\nYou can look at the [upstream](https://gitlab.com/gitlab-examples/multi-project-pipelines/simple-maven-dep/pipelines/33011429)\nand [downstream](https://gitlab.com/gitlab-examples/multi-project-pipelines/simple-maven-app/pipelines/33012000)\npipelines to see how the two projects interact to keep everything up to date.\n\n## Conclusion\n\nMulti-project pipelines are very helpful when dealing with big applications\nthat are not fully contained in a single repository. Existing features allow\nusers to connect them together and automate processes without complex setups.\n\nWe want to continue iterating on multi-project pipelines, and everyone is\ninvited to give feedback on this feature and suggest how we can make it even more\npowerful in the future.\n\n[Cover image](https://unsplash.com/photos/m3TYLFI_mDo?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by Gerrie van der Walt on [Unsplash](https://unsplash.com/search/photos/pipes?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9],{"slug":5190,"featured":6,"template":700},"use-multiproject-pipelines-with-gitlab-cicd","content:en-us:blog:use-multiproject-pipelines-with-gitlab-cicd.yml","Use Multiproject Pipelines With Gitlab Cicd","en-us/blog/use-multiproject-pipelines-with-gitlab-cicd.yml","en-us/blog/use-multiproject-pipelines-with-gitlab-cicd",{"_path":5196,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5197,"content":5203,"config":5211,"_id":5213,"_type":14,"title":5214,"_source":16,"_file":5215,"_stem":5216,"_extension":19},"/en-us/blog/using-ansible-and-gitlab-as-infrastructure-for-code",{"ogTitle":5198,"schema":5199,"ogImage":5200,"ogDescription":5201,"ogSiteName":685,"noIndex":6,"ogType":686,"ogUrl":5202,"title":5198,"canonicalUrls":5202,"description":5201},"Build enterprise-grade IaC pipelines with GitLab DevSecOps","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab and Ansible to create infrastructure as code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brad Downey\"},{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-07-01\",\n      }","https://res.cloudinary.com/about-gitlab-com/image/upload/v1746211002/zlet4rmfg2z0j6lg16mc.png","Learn how to transform infrastructure automation into scalable, secure pipelines using GitLab, Terraform/OpenTofu, and Ansible with integrated security scanning and CI/CD.","https://about.gitlab.com/blog/using-ansible-and-gitlab-as-infrastructure-for-code",{"heroImage":5200,"body":5204,"authors":5205,"updatedDate":5208,"date":5209,"title":5198,"tags":5210,"description":5201,"category":718},"Infrastructure-as-code tools like TerraForm/OpenTofu and configuration\nmanagement tools like Ansible are often part of mission-critical workflows.\nSuch projects sometimes start as simple automations and are not necessarily\nsubject to the same software development best practices and regulatory\ncontrols as business software applications.\n\n\nAt the same time many of these automations are developed by system engineers or infrastructure engineers who may not have as much experience with DevOps, DevSecOps, CI/CD, and test automation practices. This becomes even more complicated when you work in a large enterprise organization with multiple engineers and siloed teams.\n\n\nAt GitLab we know DevSecOps and we have been using our unified DevSecOps platform for enterprise-scale, mission-critical automation workloads for more than 10 years. We have thousands of customers who use GitLab as a foundation for infrastructure as code (IaC), automation, cloud, and platform engineering practices.\n\n\nIn this article, we showcase some of the key features teams can leverage to turn their powerful automations into scalable and auditable software delivery pipelines.\n\n\n![Automation listing](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433380/oipm6tq8qutoh1ctredd.png)\n\n\n## Implementation\n\n\n[This project](https://gitlab.com/gl-demo-ultimate-saberkan/public/ansible-demo) demonstrates a comprehensive DevOps workflow that combines the power of OpenTofu with modern Ansible practices, all orchestrated through GitLab CI/CD pipelines. The solution showcases how to provision an AWS lab environment using OpenTofu components integrated with GitLab, and then deploy a Tomcat web server using modern Ansible, including custom execution environments and collections.\n\n\nThe project leverages numerous GitLab features:\n\n\n* Building and storing custom Ansible execution environments in the [GitLab Container Registry](https://docs.gitlab.com/user/packages/container_registry/)\n\n* [Security scanning for infrastructure as code and container vulnerabilities](https://docs.gitlab.com/user/application_security/iac_scanning/)\n\n* Integrating [Ansible linting with GitLab's Code Quality](https://docs.gitlab.com/user/application_security/iac_scanning/)\n\n* Storing Tomcat binaries in the [Generic Package Repository](https://docs.gitlab.com/user/packages/generic_packages/)\n\n* Utilizing [CI/CD environment variables for configuration](https://docs.gitlab.com/ci/variables/)\n\n\nThe entire workflow is automated through a [GitLab pipeline](https://docs.gitlab.com/ci/pipelines/) that handles everything from infrastructure provisioning to application deployment and security testing.\n\n\n![ Workflow automated through a GitLab pipeline ](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433380/giatmolwn9inusi4cev2.png)\n\n\n### Provisioning the environment with OpenTofu\n\n\nThe project begins with provisioning an AWS lab environment using OpenTofu. This is achieved through native integration with [GitLab's OpenTofu components](https://docs.gitlab.com/user/infrastructure/iac/), which streamline the infrastructure provisioning process. The pipeline includes validate, plan, and apply stages that ensure proper infrastructure deployment while maintaining GitLab's IaC best practices.\n\n\nThis project is leveraging [GitLab's Terraform State management](https://docs.gitlab.com/user/infrastructure/iac/terraform_state/) and [Terraform Module Registry](https://docs.gitlab.com/user/packages/terraform_module_registry/) capabilities. Both of these features are compatible with OpenTofu and HashiCorp Terraform. GitLab OpenTofu components can also be used with HashiCorp Terraform with [slight customization](https://gitlab.com/components/opentofu#can-i-use-this-component-with-terraform). You'll need to build your own image that includes a script named `gitlab-tofu` to keep it compatible with the component jobs then you can then modify `tofu` commands with `terraform` commands.\n\n\nThe OpenTofu module release component is a sample demonstrating how to build a Terraform module and store it in GitLab's Terraform module registry. The `provision_lab.tf` file imports this module directly from GitLab to deploy the lab environment in AWS. Upon completion, it outputs an inventory file containing the public IP address of the provisioned instance, which can be used in configuration management stages with Ansible.\n\n\n```\n\n# From .gitlab-ci.yml\n - component: gitlab.com/components/opentofu/module-release@1.1.0\n   inputs:\n     root_dir: tofu\n     as: 🔍 tofu-module-release\n     stage: 🏗️ build-tofu-module\n     module_version: 0.0.1\n     module_system: aws\n     module_name: aws-lab\n     root_dir: tofu/modules/ansible-demo/aws-lab\n     rules:\n       - if: \"$CI_COMMIT_BRANCH\"\n         when: manual\n```\n\n\n```\n\n# From provision_lab.tf\n\nmodule \"aws-lab\" {\n  source = \"https://gitlab.com/api/v4/projects/67604719/packages/terraform/modules/aws-lab/aws/0.0.1\"\n}\n\n```\n\n\nThe validate, plan, and deploy components are configured with `**auto_define_backend: true**`, which automatically integrates with GitLab's built-in Terraform state backend. This approach eliminates the need for manual backend configuration or external state storage solutions like S3 buckets.\n\n\n```\n\n# From gitlab-ci.yml\n\n- component: gitlab.com/components/opentofu/apply@0.55.0\n  inputs:\n    version: 0.55.0\n    opentofu_version: 1.8.8\n    root_dir: tofu\n    state_name: demo\n    as: ✅ tofu-apply\n    stage: 🏗️ provision-lab\n    auto_define_backend: true\n    rules:\n      - if: \"$CI_COMMIT_BRANCH\"\n        when: manual\n```\n\n\n![Validate, plan, and deploy components are configured with `auto_define_backend: true`](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433380/giatmolwn9inusi4cev2.png)\n\n\nThe infrastructure configuration creates a CentOS Stream 9 EC2 instance with appropriate security groups for SSH access from GitLab runners and HTTP access to the Tomcat server.\n\n\nSSH access and HTTP configuration are configuration thought [GitLab CI/CD environment variables](https://docs.gitlab.com/ci/variables/#define-a-cicd-variable-in-the-ui).\n\n\n![SSH access and HTTP configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433381/cmqtzg6ahz8ua5w8ybgs.png)\n\n\nFor secure cloud access, the project implements [GitLab's OpenID Connect integration](https://docs.gitlab.com/ci/cloud_services/aws/) with AWS, using temporary credentials through AWS Security Token Service (STS):\n\n\n```\n\n# From .gitlab-ci.yml\n\n.tofu_aws_setup:\n id_tokens:\n   OIDC_TOKEN:\n     aud: https://gitlab.com\n before_script:\n   - echo \"${OIDC_TOKEN}\" > /tmp/web_identity_token\n   - export AWS_PROFILE=\"\"\n   - export AWS_ROLE_ARN=\"${AWS_ROLE_ARN}\"\n   - export AWS_WEB_IDENTITY_TOKEN_FILE=\"/tmp/web_identity_token\"\n```\n\n\n### Building the Ansible execution environment\n\n\nA key aspect of modern Ansible deployments is the use of [execution environments](https://docs.ansible.com/ansible/latest/getting_started_ee/index.html), containerized versions of Ansible with all necessary dependencies including roles and collections pre-installed. This project creates a custom execution environment based on Fedora 39, which includes ansible-core, ansible-runner, and additional collection such as ansible.posix required in this example for firewall and selinux configuration.\n\n\nThe third-party roles and collections in this project are natively downloaded from the community Ansible Galaxy repository. This approach leverages the community ecosystem of reusable Ansible content, as shown in the execution environment configuration. While this demo utilizes community Ansible resources, the exact same pipeline implementation is fully compatible with Red Hat Ansible Automation Platform. The pipeline structure remains identical, with only the content sources changing. Organizations using the enterprise version can simply redirect their automation content sources to their private Automation Hub instead of the default community Ansible Galaxy. According to the official enterprise documentation, this can be achieved by [configuring your private Automation Hub server and access token in the ansible.cfg](https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/1.2/html/getting_started_with_red_hat_ansible_automation_hub/proc-configure-automation-hub-server#proc-configure-automation-hub-server).\n\n\n```\n\n# From execution-environment.yml\n\n---\n\nversion: 3\n\n\nimages:\n  base_image:\n    name: quay.io/fedora/fedora:39\n\ndependencies:\n  ansible_core:\n    package_pip: ansible-core\n  ansible_runner:\n    package_pip: ansible-runner\n  system:\n    - openssh-clients\n    - sshpass\n  galaxy:\n    collections:\n    - name: ansible.posix\n      version: \">=2.0.0\"\n```\n\n\n![Execution environment pushed to GitLab's Container Registry ](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433384/dh1o2ojjmb04ru4tfr9k.png)\n\n\nThe execution environment is defined in a YAML file and built using ansible-builder, then pushed to [GitLab's Container Registry](https://docs.gitlab.com/user/packages/container_registry/). This approach ensures consistent execution environments across different systems and simplifies dependency management.\n\n\n```\n\n# From gitlab-ci.yml\n\n🔨 ansible-build-ee:\n  stage: 📦 ansible-build-ee\n  image: docker:24.0.5\n  needs: []\n  services:\n    - docker:24.0.5-dind\n  before_script:\n    - apk add --no-cache python3 py3-pip\n    - pip install ansible-builder\n    - cd ansible/execution-environment\n  script:\n    - ansible-builder build -t ${EE_IMAGE_NAME}:${EE_IMAGE_TAG} --container-runtime docker\n    - docker tag ${EE_IMAGE_NAME}:${EE_IMAGE_TAG} ${CI_REGISTRY_IMAGE}/${EE_IMAGE_NAME}:${EE_IMAGE_TAG}\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - docker push ${CI_REGISTRY_IMAGE}/${EE_IMAGE_NAME}:${EE_IMAGE_TAG}\n```\n\n\n### Deploying Tomcat with Ansible\n\n\nOnce the infrastructure is provisioned and the execution environment is built, the pipeline deploys Tomcat using [Ansible Navigator](https://ansible.readthedocs.io/projects/navigator/). The execution environment built in previous stage is used as image for deployment job in GitLab pipeline.\n\n\n```\n\n# From gitlab-ci.yml\n\n🚀 ansible-deploy:\n  stage: 🚀 ansible-deploy\n  image: ${CI_REGISTRY_IMAGE}/${EE_IMAGE_NAME}:${EE_IMAGE_TAG}\n  needs:\n    - ✅ tofu-apply\n  extends: [.ssh_private_key_setup, .default_rules]\n  script:\n    - ansible-navigator run ansible/playbook.yml\n      -i ansible/inventory/hosts.ini\n      --execution-environment false\n      --mode stdout\n      --log-level debug\n```\n\n\nThe Tomcat deployment fetches the application package from [GitLab's Generic Package Repository](https://docs.gitlab.com/user/packages/generic_packages/), configures system users and permissions, and sets up Tomcat as a systemd service.\n\n\n```\n\n# From playbook.yml\n\n---\n\n- name: Deploy Tomcat Server\n  hosts: all\n  become: true\n  roles:\n      - role: tomcat\n\n  vars:\n    # Tomcat package and installation\n    tomcat_package: \"https://gitlab.com/api/v4/projects/67604719/packages/generic/apache-tomcat/10.1.39/apache-tomcat-10.1.39.tar.gz\"\n    tomcat_install_dir: \"/opt/tomcat\"\n    java_package: \"java-17-openjdk-devel\"\n```\n\n\n![GitLab Package Registry](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433381/mynak8i2k7ms9vhdijqg.png)\n\n\n### Security scanning and code quality\n\n\nSecurity is integrated throughout the pipeline with multiple scanning tools. The project uses [GitLab's built-in SAST IaC scanner](https://docs.gitlab.com/user/application_security/iac_scanning/) to detect vulnerabilities in both Terraform and Ansible code. [Container scanning](https://docs.gitlab.com/user/application_security/container_scanning/) is applied to the execution environment image to identify any security issues and generate a [software bill of materials (SBOM)](https://docs.gitlab.com/user/application_security/container_scanning/#cyclonedx-software-bill-of-materials).\n\n\n```\n\n# From gitlab-ci.yml\n\ninclude:\n\n- template: Jobs/SAST-IaC.gitlab-ci.yml\n\n- template: Jobs/Container-Scanning.gitlab-ci.yml\n\n```\n\n\n![Security is integrated throughout the pipeline with multiple scanning tools](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433386/e6ejckcv5kdyhhosej2f.png)\n\n\n\n\n![Dependency listing](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433380/gsfpaldra4rmtkseaudo.png)\n\n\nAdditionally, the project integrates Ansible Linter with [GitLab's Code Quality](https://docs.gitlab.com/ci/testing/code_quality/#import-code-quality-results-from-a-cicd-job). This integration produces reports that are displayed directly in the GitLab interface, making it easy to identify and address issues.\n\n\n```\n\n# From gitlab-ci.yml\n\n🔍 ansible-lint:\n  stage: 🚀 ansible-deploy\n  image: ${CI_REGISTRY_IMAGE}/${EE_IMAGE_NAME}:${EE_IMAGE_TAG}\n  needs: []\n  script:\n    - ansible-lint ansible/playbook.yml -f codeclimate | python3 -m json.tool | tee gl-code-quality-report.json || true\n  artifacts:\n    reports:\n      codequality:\n        - gl-code-quality-report.json\n```\n\n\n![The project integrates Ansible Linter with GitLab code quality](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433380/gsfpaldra4rmtkseaudo.png)\n\n\n### Health-checking the deployment\n\n\nAfter deployment, the pipeline performs health checks to ensure that the Tomcat server is running correctly. The health-check job attempts to connect to the server's HTTP port and verifies that it returns a successful response. This ensures that the deployment has completed successfully, and the application is accessible.\n\n\nYou can test access from your browser into the Tomcat-provisioned instance using the public IP address of the EC2 provisioned instance.\n\n\n![Checking the health of a job](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750433385/uksdkjryydxhu94v1naj.png)\n\n\n## Destroying the lab environment\n\n\nThe final stage of the pipeline is the cleanup process, which destroys the lab environment. This is implemented using the OpenTofu destroy component, which ensures that all resources created during the provisioning stage are properly removed.\n\n\n## Summary\n\n\nGitLab provides a unified DevSecOps platform and a framework to manage enterprise-scale, mission-critical infrastructure as code and configuration management automation practices. The framework includes version control, project planning and issue management, team collaboration, CI/CD pipelines, binary package and container registry, security scanning, and many other helpful features along with the ability to embed governance and controls in the processes. If you are looking to expand your private or public cloud practices or in general any governed, self-service automation workflow, consider GitLab, TerraForm, and Ansible as the three-legged stool and the foundation for a scalable and governed automation platform.\n\n\n> Get started with a [free trial of GitLab Ultimate](http://bout.gitlab.com/free-trial/). Sign up today!\n",[5206,5207],"George Kichukov","Salahddine Aberkan","2025-04-24","2019-07-01",[896,9],{"slug":5212,"featured":6,"template":700},"using-ansible-and-gitlab-as-infrastructure-for-code","content:en-us:blog:using-ansible-and-gitlab-as-infrastructure-for-code.yml","Using Ansible And Gitlab As Infrastructure For Code","en-us/blog/using-ansible-and-gitlab-as-infrastructure-for-code.yml","en-us/blog/using-ansible-and-gitlab-as-infrastructure-for-code",{"_path":5218,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5219,"content":5224,"config":5228,"_id":5230,"_type":14,"title":5231,"_source":16,"_file":5232,"_stem":5233,"_extension":19},"/en-us/blog/using-bazel-to-speed-up-gitlab-ci-builds",{"title":5220,"description":5221,"ogTitle":5220,"ogDescription":5221,"noIndex":6,"ogImage":1179,"ogUrl":5222,"ogSiteName":685,"ogType":686,"canonicalUrls":5222,"schema":5223},"How to use Bazel with GitLab to speed up your builds","We explain why Bazel and GitLab CI are a great match to speed up your build times.","https://about.gitlab.com/blog/using-bazel-to-speed-up-gitlab-ci-builds","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use Bazel with GitLab to speed up your builds\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2020-09-01\",\n      }",{"title":5220,"description":5221,"authors":5225,"heroImage":1179,"date":1696,"body":5226,"category":718,"tags":5227},[803],"[Bazel](https://bazel.build/) is a useful tool that can be used with GitLab\nCI to push your build pipelines into overdrive.\n\n\nFor maximum correctness, [CI/CD](/topics/ci-cd/) systems will usually\nrebuild all of the artifacts from scratch on every run. This method is\nconsidered safer since artifacts from one pipeline won't negatively impact\nsubsequent pipelines, and is a lesson learned from older CI tools where the\nagent state was persistent over time – so you never really knew if you could\ndo a build from scratch. The problem with redoing everything every time\nthough, is that it's slow. GitLab improves upon this by using caches and\nshared artifacts, but there's only so far that approach can take you.\n\n\nBazel is a good example of tackling things in a different way – it speeds up\nbuilds by only rebuilding what is necessary. On the surface, this might\nsound a lot like just having a cache and doing an incremental build. But the\nmain difference is that Bazel is really good at not only being fast, but\nalso\n[correct](https://docs.bazel.build/versions/3.4.0/guide.html#correct-incremental-rebuilds).\nBazel is much more reliable than traditional `Makefiles` or build scripts,\nwhich are notorious for occasionally forcing you to `make clean` because\nthey get into some inconsistent state they can't recover from.\n\n\nAs of now, Bazel supports building Java, C, C++, Python, and Objective-C,\nand can also produce packages for deployment on Android or iOS. More\ncapabilities are being added all the time, as well as open source rule sets\nfor other languages like Go, Scala and many more, so be sure to check their\nlatest [product\noverview](https://docs.bazel.build/versions/3.4.0/bazel-overview.html) for\nupdates.\n\n\n## Setting up Bazel builds in GitLab CI\n\n\nSetting up Bazel for builds is very straightforward. A job like the\nfollowing does everything you need:\n\n\n```yaml\n\nvariables:\n  BAZEL_DIGEST_VERSION: \"f670e9aec235aa23a5f068566352c5850a67eb93de8d7a2350240c68fcec3b25\" # Bazel 3.4.1\n\nbuild:\n  image:\n    name: gcr.io/cloud-marketplace-containers/google/bazel@sha256:$BAZEL_DIGEST_VERSION\n    entrypoint: [\"\"]\n  stage: build\n  script:\n    - bazel --output_base output build //main/...\n  artifacts:\n    paths:\n      - bazel-bin/main/hello-world\n  cache:\n    key: $BAZEL_DIGEST_VERSION\n    paths:\n      - output\n```\n\n\nWhat this script does is define a job called `build` which uses the official\nGoogle Bazel image. We track the digest version for two reasons: First, to\nensure immutability (tags can be updated), and second to use it as a cache\nkey so that the cache is invalidated whenever we upgrade the Bazel version.\nWe also override the entry point because we want to pass our own parameters\nto our `bazel` invocation. The second parameter is the\n[label](https://docs.bazel.build/versions/master/glossary.html#label) of the\n[target](https://docs.bazel.build/versions/master/glossary.html#target) we\nwant to build. A [target\npattern](https://docs.bazel.build/versions/master/glossary.html#target-pattern)\ncan also be used here to tell Bazel to build multiple things (and what they\ndepend on), rather than one thing (and what it depends on).\n\n\nThe first parameter (`--output_base output`) is to help Bazel work with a\nsecurity feature of the GitLab runner. By default, the runner will [not\naccess files outside of the build\ndir](https://docs.gitlab.com/ee/ci/yaml/#artifactspaths), but Bazel places\nits own cache outside by default. This parameter tells Bazel to place it\ninside, where the runner can access it. The next two sections (`artifacts`\nand `cache`), tell the runner where the output file you want to keep is, and\nimportantly for Bazel, where the cache is that you want to persist. Note\nthat until [this issue to allow for traversing\nsymlinks](https://gitlab.com/gitlab-org/gitlab/-/issues/19746) is resolved,\nyou must give the full path to the specific outputs you want to keep within\nthe `bazel-bin` folder.\n\n\nWhen this job runs, it places the current cache (if it exists, and only for\nthe current `BAZEL_DIGEST_VERSION`) in the `output` folder, and then runs\n`bazel` to build the `main:hello-world` target. It saves the artifact from\n`bazel-bin/main/hello-world`, and then caches everything in `output` for the\nnext run.\n\n\n### Bazel: notes on caching\n\n\nIn this example we've set up Bazel to work with GitLab caching, and this is\nhow we currently use it internally. If you already have Bazel remote cache\n(or even better, Bazel remote execution), there is no need to set up GitLab\nCI cache: It actually would likely make things slower since in that case\nthere is no need to download and unpack the cache at all. Setting up remote\ncaching or remote execution are more advanced and outside of the scope of\nthis article, but are even better ways to speed up the build. Until then,\nusing a GitLab cache can be a good interim step. If you're interested in\nlearning more about remote cache/remote execution, this [BazelCon\nvideo](https://www.youtube.com/watch?v=MyuJRUwT5LI&t=1017s) or Bazel's\nofficial [documentation on remote\ncaching](https://docs.bazel.build/versions/master/remote-caching.html) may\nbe helpful.\n\n\n## Building and testing with Bazel\n\n\nUsing Bazel to run your tests is just as easy, and there are nice benefits\nto doing so. If you can rely on accurately knowing what has changed, you can\nbe more selective in doing incremental tests and have the confidence that\ntests that were skipped were truly unnecessary. This is also quite easy to\nset up using Bazel, but one thing to consider is that running builds and\ntests all at once (rather than splitting build and test into different jobs)\nis going to be more efficient. You can do that by using a build job that\nlooks like this:\n\n\n```yaml\n\nvariables:\n  BAZEL_DIGEST_VERSION: \"f670e9aec235aa23a5f068566352c5850a67eb93de8d7a2350240c68fcec3b25\" # 3.4.1\n\nbuild:\n  image:\n    name: gcr.io/cloud-marketplace-containers/google/bazel@sha256:$BAZEL_DIGEST_VERSION\n    entrypoint: [\"\"]\n  stage: build\n  script:\n    - bazel --output_base output test //main/...\n  artifacts:\n    paths:\n      - bazel-bin/main/hello-world\n  cache:\n    key: $BAZEL_DIGEST_VERSION\n    paths:\n      - output\n```\n\n\nIn a build that includes all tests, you typically want to run everything\nthat changed. That's usually done using an invocation like `bazel test\n//main/...` which:\n\n\n1. Finds all targets (referred to as `...`) in the workspace location (`//`\ndenotes the root of the\n[workspace](https://docs.bazel.build/versions/master/glossary.html#workspace)),\nso we are referring to `main` relative to the root.) Note that you probably\ndon't want to include a bare `//` (without `main`), since that will include\nthe custom `output` folder and that is probably not what you intended.\n\n1. Builds usual targets.\n\n1. Builds test targets.\n\n1. Runs test targets.\n\n\nOnly using the `test` parameter works because `bazel test` not only runs\ntests, but also builds everything that matched the target pattern by\ndefault. Individual targets can be excluded from being matched by `...` by\napplying a `manual` tag to them ([see `tags` in the Bazel glossary\ntable](https://docs.bazel.build/versions/master/be/common-definitions.html#common-attributes)).\nOne callout - in the example project we're building ([details\nbelow](#examples)), there actually aren't any tests, so this fails because\nwe requested a test pass and there weren't any. If your project has tests in\nit, it will work fine.\n\n\n## Examples using Bazel\n\n\nWe're actually using Bazel here at GitLab to build our [GitLab Agent for\nKubernetes](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent).\nIf you're interested in seeing a more complex, complete implementation using\nBazel then that's a great one to explore. The simple example from this blog\ncan be found live in [my own personal\nproject](https://gitlab.com/jyavorska/testbazel), and it is based on the\n[stage three build\ntutorial](https://docs.bazel.build/versions/3.4.0/tutorial/cpp.html) from\nBazel's own documentation.\n\n\nBazel itself is also highly configurable through its own `.bazelrc`, `BUILD`\nfiles, and more. The [user documentation for\nBazel](https://docs.bazel.build/versions/master/guide.html) contains several\nexamples along with an exhaustive configuration reference.\n\n\n## What's next with Bazel?\n\n\nWe are considering using Bazel in few more areas within GitLab:\n\n\n- In an ideal world, after a minor change, the build and test should only\ntake a few seconds to complete. When the jobs are fast enough, it could even\nbe triggered via an editor on every change before being committed to git at\nall. This kind of capability could be integrated with the Web IDE, giving\nyou immediate insight into the results of your change. We have an issue\nrelated to [making it easier to run pipelines from the Web\nIDE](https://gitlab.com/gitlab-org/gitlab/-/issues/213604) that could take\nadvantage of this.\n\n- By default, GitLab uses [a gem we\ncreated](https://gitlab.com/gitlab-org/ci-cd/test_file_finder/) (which is\navailable in this\n[template](https://docs.gitlab.com/ee/ci/testing/fail_fast_testing.html) for\ntest execution optimization, but all we're doing so far is running the\nriskiest tests first. As Bazel grows and adds support for more languages, it\ncould potentially become a standard for this purpose, allowing you to run\neven fewer tests (and among those, the riskiest ones first). We have an\n[epic](https://gitlab.com/groups/gitlab-org/-/epics/4121) where you can\ntrack progress toward this idea.\n\n- Finally, Bazel also supports distributed builds and caching, opening the\ndoor to autoscaling compilation and test capacity alongside runner capacity,\nor even sharing the same capacity for whatever jobs are needed at a given\nmoment. This function would require managing your own capacity for this\npurpose, but in the future we could imagine this being added to GitLab. We\nhave an [issue for exploring different ways Bazel could support distributed\njobs](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26663) using the\nGitLab Runner.\n\n\n## Tell us your Bazel success stories\n\n\nAre you using Bazel with GitLab CI? We'd love your feedback on what features\nwe could add to make things work better and hear about the performance gains\nyou've found from the combo. Please let us know in the Meta issue below, or\ncontact [Jason Yavorska](https://twitter.com/j4yav) on Twitter.\n\n\n## Related content\n\n\n- [Bazel website](https://bazel.build/)\n\n- [Meta issue for deeper integration in\nGitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/201484)\n\n- [Bazel blog on integrating it with CI\nsystems](https://blog.bazel.build/2016/01/27/continuous-integration.html)\n\n- [GitLab CI quick start](https://docs.gitlab.com/ee/ci/quick_start/)\n\n\nCover image by [Lucas van Oort](https://unsplash.com/@switch_dtp_fotografie)\non [Unsplash](https://unsplash.com)\n\n{: .note}\n",[9,232,917],{"slug":5229,"featured":6,"template":700},"using-bazel-to-speed-up-gitlab-ci-builds","content:en-us:blog:using-bazel-to-speed-up-gitlab-ci-builds.yml","Using Bazel To Speed Up Gitlab Ci Builds","en-us/blog/using-bazel-to-speed-up-gitlab-ci-builds.yml","en-us/blog/using-bazel-to-speed-up-gitlab-ci-builds",{"_path":5235,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5236,"content":5242,"config":5248,"_id":5250,"_type":14,"title":5251,"_source":16,"_file":5252,"_stem":5253,"_extension":19},"/en-us/blog/using-child-pipelines-to-continuously-deploy-to-five-environments",{"title":5237,"description":5238,"ogTitle":5237,"ogDescription":5238,"noIndex":6,"ogImage":5239,"ogUrl":5240,"ogSiteName":685,"ogType":686,"canonicalUrls":5240,"schema":5241},"Using child pipelines to continuously deploy to five environments","Learn how to manage continuous deployment to multiple environments, including temporary, on-the-fly sandboxes, with a minimalist GitLab workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097012/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_397632156_3Ldy1urjMStQCl4qnOBvE0_1750097011626.jpg","https://about.gitlab.com/blog/using-child-pipelines-to-continuously-deploy-to-five-environments","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Using child pipelines to continuously deploy to five environments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Olivier Dupré\"}],\n        \"datePublished\": \"2024-09-26\",\n      }",{"title":5237,"description":5238,"authors":5243,"heroImage":5239,"date":5245,"body":5246,"category":718,"tags":5247},[5244],"Olivier Dupré","2024-09-26","DevSecOps teams sometimes require the ability to manage continuous\ndeployment across multiple environments — and they need to do so without\nchanging their workflows. The [GitLab DevSecOps\nplatform](https://about.gitlab.com/) supports this need, including\ntemporary, on-the-fly sandboxes, with a minimalist approach. In this\narticle, you'll learn how to run continuous deployment of infrastructure\nusing Terraform, over multiple environments.\n\n\nThis strategy can easily be applied to any project, whether it is\ninfrastructure as code (IaC) relying on another technology, such as\n[Pulumi](https://www.pulumi.com/) or [Ansible](https://www.ansible.com/),\nsource code in any language, or a monorepo that mixes many languages.\n\n\nThe final pipeline that you will have at the end of this tutorial will\ndeploy:\n\n\n* A temporary **review** environment for each feature branch.\n\n* An **integration** environment, easy to wipe out and deployed from the\nmain branch.\n\n* A **QA** environment, also deployed from the main branch, to run quality\nassurance steps.\n\n* A **staging** environment, deployed for every tag. This is the last round\nbefore production.\n\n* A **production** environment, just after the staging environment. This one\nis triggered manually for demonstration, but can also be continuously\ndeployed.\n\n\n>Here is the legend for the flow charts in this article:\n\n> * Round boxes are the GitLab branches.\n\n> * Square boxes are the environments.\n\n> * Text on the arrows are the actions to flow from one box to the next.\n\n> * Angled squares are decision steps.\n\n\n\u003Cpre class=\"mermaid\">\n\nflowchart LR\n    A(main) -->|new feature| B(feature_X)\n\n    B -->|auto deploy| C[review/feature_X]\n    B -->|merge| D(main)\n    C -->|destroy| D\n\n    D -->|auto deploy| E[integration]\n    E -->|manual| F[qa]\n\n    D -->|tag| G(X.Y.Z)\n    F -->|validate| G\n\n    G -->|auto deploy| H[staging]\n    H -->|manual| I{plan}\n    I -->|manual| J[production]\n\u003C/pre>\n\n\nOn each step, you'll learn the [why](#why) and the [what](#what) before\nmoving to the [how](#how). This will help you fully understand and replicate\nthis tutorial.\n\n\n## Why\n\n\n* [Continuous\nintegration](https://about.gitlab.com/topics/ci-cd/#what-is-continuous-integration-ci)\nis almost a de facto standard. Most companies have implemented CI pipelines\nor are willing to standardize their practice.\n\n\n* [Continuous\ndelivery](https://about.gitlab.com/topics/ci-cd/#what-is-continuous-delivery-cd),\nwhich pushes artifacts to a repository or registry at the end of the CI\npipeline, is also popular.\n\n\n* Continuous deployment, which goes further and deploys these artifacts\nautomatically, is less widespread. When it has been implemented, we see it\nessentially in the application field. When discussing continuously\ndeploying  infrastructure, the picture seems less obvious, and is more about\nmanaging several environments. In contrast, testing, securing, and verifying\nthe infrastructure's code seems more challenging. And this is one of the\nfields where DevOps has not yet reached its maturity. One of the other\nfields is to shift security left, integrating security teams and, more\nimportantly, security concerns, earlier in the delivery lifecycle, to\nupgrade from DevOps to ***DevSecOps***.\n\n\nGiven this high-level picture, in this tutorial, you will work toward a\nsimple, yet efficient way to implement DevSecOps for your infrastructure\nthrough the example of deploying resources to five environments, gradually\nprogressing from development to production.\n\n\n__Note:__ Even if I advocate embracing a FinOps approach and reducing the\nnumber of environments, sometimes there are excellent reasons to maintain\nmore than just dev, staging, and production. So, please, adapt the examples\nbelow to match your needs.\n\n\n## What\n\n\nThe rise of cloud technology has driven the usage of IaC. Ansible and\nTerraform were among the first to pave the road here. OpenTofu, Pulumi, AWS\nCDK, Google Deploy Manager, and many others joined the party.\n\n\nDefining IaC is a perfect solution to feel safe when deploying\ninfrastructure. You can test it, deploy it, and replay it again and again\nuntil you reach your goal.\n\n\nUnfortunately, we often see companies maintain several branches, or even\nrepositories, for each of their target environments. And this is where the\nproblems start. They are no longer enforcing a process. They are no longer\nensuring that any change in the production code base has been accurately\ntested in previous environments. And they start seeing drifts from one\nenvironment to the other.\n\n\nI realized this tutorial was necessary when, at a conference I attended,\nevery participant said they do not have a workflow that enforces the\ninfrastructure to be tested thoroughly before being deployed to production.\nAnd they all agreed that sometimes they patch the code directly in\nproduction. Sure, this is fast, but is it safe? How do you report back to\nprevious environments? How do you ensure there are no side effects? How do\nyou control whether you are putting your company at risk with new\nvulnerabilities being pushed too quickly in production?\n\n\nThe question of *why* DevOps teams deploy directly to production is critical\nhere. Is it because the pipeline could be more efficient or faster? Is there\nno automation? Or, even worse, because there is *no way to test accurately\noutside of production*?\n\n\nIn the next section, you will learn how to implement automation for your\ninfrastructure and ensure that your DevOps team can effectively test what\nyou are doing before pushing to any environment impacting others. You will\nsee how your code is secured and its deployment is controlled, end-to-end.\n\n\n## How\n\n\nAs mentioned earlier, there are many IaC languages out there nowadays and we\nobjectively cannot cover *all* of them in a single article. So, I will rely\non a basic Terraform code running on Version 1.4. Please do not focus on the\nIaC language itself but instead on the process that you could apply to your\nown ecosystem.\n\n\n### The Terraform code\n\n\nLet's start with a fundamental Terraform code.\n\n\nWe are going to deploy to AWS, a virtual private cloud (VPC), which is a\nvirtual network. In that VPC, we will deploy a public and a private subnet.\nAs their name implies, they are subnets of the main VPC. Finally, we will\nadd an Elastic Cloud Compute (EC2) instance (a virtual machine) in the\npublic subnet.\n\n\nThis demonstrates the deployment of four resources without adding too much\ncomplexity. The idea is to focus on the pipeline, not the code.\n\n\nHere is the target we want to reach for your repository.\n\n\n![target for\nrepository](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097033/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097033415.png)\n\n\nLet’s do it step by step.\n\n\nFirst, we declare all resources in a `terraform/main.tf` file:\n\n\n```terraform\n\nprovider \"aws\" {\n  region = var.aws_default_region\n}\n\n\nresource \"aws_vpc\" \"main\" {\n  cidr_block = var.aws_vpc_cidr\n\n  tags = {\n    Name     = var.aws_resources_name\n  }\n}\n\n\nresource \"aws_subnet\" \"public_subnet\" {\n  vpc_id     = aws_vpc.main.id\n  cidr_block = var.aws_public_subnet_cidr\n\n  tags = {\n    Name = \"Public Subnet\"\n  }\n}\n\nresource \"aws_subnet\" \"private_subnet\" {\n  vpc_id     = aws_vpc.main.id\n  cidr_block = var.aws_private_subnet_cidr\n\n  tags = {\n    Name = \"Private Subnet\"\n  }\n}\n\n\nresource \"aws_instance\" \"sandbox\" {\n  ami           = var.aws_ami_id\n  instance_type = var.aws_instance_type\n\n  subnet_id = aws_subnet.public_subnet.id\n\n  tags = {\n    Name     = var.aws_resources_name\n  }\n}\n\n```\n\n\nAs you can see, there are a couple of variables that are needed for this\ncode, so let's declare them in a `terraform/variables.tf` file:\n\n\n```terraform\n\nvariable \"aws_ami_id\" {\n  description = \"The AMI ID of the image being deployed.\"\n  type        = string\n}\n\n\nvariable \"aws_instance_type\" {\n  description = \"The instance type of the VM being deployed.\"\n  type        = string\n  default     = \"t2.micro\"\n}\n\n\nvariable \"aws_vpc_cidr\" {\n  description = \"The CIDR of the VPC.\"\n  type        = string\n  default     = \"10.0.0.0/16\"\n}\n\n\nvariable \"aws_public_subnet_cidr\" {\n  description = \"The CIDR of the public subnet.\"\n  type        = string\n  default     = \"10.0.1.0/24\"\n}\n\n\nvariable \"aws_private_subnet_cidr\" {\n  description = \"The CIDR of the private subnet.\"\n  type        = string\n  default     = \"10.0.2.0/24\"\n}\n\n\nvariable \"aws_default_region\" {\n  description = \"Default region where resources are deployed.\"\n  type        = string\n  default     = \"eu-west-3\"\n}\n\n\nvariable \"aws_resources_name\" {\n  description = \"Default name for the resources.\"\n  type        = string\n  default     = \"demo\"\n}\n\n```\n\n\nAlready, we are almost good to go on the IaC side. What's missing is a way\nto share the Terraform states. For those who don't know, Terraform works\nschematically doing the following:\n\n\n* `plan` checks the differences between the current state of the\ninfrastructure and what is defined in the code. Then, it outputs the\ndifferences.\n\n* `apply` applies the differences in the `plan` and updates the state.\n\n\nFirst round, the state is empty, then it is filled with the details (ID,\netc.) of the resources applied by Terraform.\n\n\nThe problem is: Where is that state stored? How do we share it so several\ndevelopers can collaborate on code?\n\n\nThe solution is fairly simple: Leverage GitLab to store and share the state\nfor you through a [Terraform HTTP\nbackend](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html).\n\n\nThe first step in using this backend is to create the most simple\n`terraform/backend.tf` file. The second step will be handled in the\npipeline.\n\n\n```terraform\n\nterraform {\n  backend \"http\" {\n  }\n}\n\n```\n\n\nEt voilà! We have a bare minimum Terraform code to deploy these four\nresources. We will provide the variable values at the runtime, so let's do\nthat later.\n\n\n### The workflow\n\n\nThe workflow that we are going to implement now is the following:\n\n\n\u003Cpre class=\"mermaid\">\n\nflowchart LR\n    A(main) -->|new feature| B(feature_X)\n\n    B -->|auto deploy| C[review/feature_X]\n    B -->|merge| D(main)\n    C -->|destroy| D\n\n    D -->|auto deploy| E[integration]\n    E -->|manual| F[qa]\n\n    D -->|tag| G(X.Y.Z)\n    F -->|validate| G\n\n    G -->|auto deploy| H[staging]\n    H -->|manual| I{plan}\n    I -->|manual| J[production]\n\u003C/pre>\n\n\n1. Create a **feature** branch. This will continuously run all scanners on\nthe code to ensure that it is still compliant and secured. This code will be\ncontinuously deployed to a temporary environment `review/feature_branch`\nwith the name of the current branch. This is a safe environment where the\ndevelopers and operations teams can test their code without impacting\nanybody. This is also where we will enforce the process, like enforcing code\nreviews and running scanners, to ensure that the quality and security of the\ncode are acceptable and do not put your assets at risk. The infrastructure\ndeployed by this branch is automatically destroyed when the branch is\nclosed. This helps you keep your budget under control.\n\n\n\u003Cpre class=\"mermaid\">\n\nflowchart LR\n    A(main) -->|new feature| B(feature_X)\n\n    B -->|auto deploy| C[review/feature_X]\n    B -->|merge| D(main)\n    C -->|destroy| D\n\u003C/pre>\n\n\n2. Once approved, the feature branch will be **merged** into the main\nbranch. This is a [protected\nbranch](https://docs.gitlab.com/ee/user/project/protected_branches.html)\nwhere no one can push. This is mandatory to ensure that every change request\nto production is thoroughly tested. That branch is also continuously\ndeployed. The target here is the `integration` environment. To keep this\nenvironment slightly more stable, its deletion is not automated but can be\ntriggered manually.\n\n\n\u003Cpre class=\"mermaid\">\n\nflowchart LR\n    D(main) -->|auto deploy| E[integration]\n\u003C/pre>\n\n\n3. From there, manual approval is required to trigger the next deployment.\nThis will deploy the main branch to the `qa` environment. Here, I have set a\nrule to prevent deletion from the pipeline. The idea is that this\nenvironment should be quite stable (after all, it's already the third\nenvironment), and I would like to prevent deletion by mistake. Feel free to\nadapt the rules to match your processes.\n\n\n\u003Cpre class=\"mermaid\">\n\nflowchart LR\n    D(main)-->|auto deploy| E[integration]\n    E -->|manual| F[qa]\n\u003C/pre>\n\n\n4. To proceed, we will need to **tag** the code. We are relying on\n[protected\ntags](https://docs.gitlab.com/ee/user/project/protected_tags.html) here to\nensure that only a specific set of users are allowed to deploy to these last\ntwo environments. This will immediately trigger a deployment to the\n`staging` environment.\n\n\n\u003Cpre class=\"mermaid\">\n\nflowchart LR\n    D(main) -->|tag| G(X.Y.Z)\n    F[qa] -->|validate| G\n\n    G -->|auto deploy| H[staging]\n\u003C/pre>\n\n\n5. Finally, we are landing to `production`. When discussing infrastructure,\nit is often challenging to deploy progressively (10%, 25%, etc.), so we will\ndeploy the whole infrastructure. Still, we control that deployment with a\nmanual trigger of this last step. And to enforce maximum control on this\nhighly critical environment, we will control it as a [protected\nenvironment](https://docs.gitlab.com/ee/ci/environments/protected_environments.html).\n\n\n\u003Cpre class=\"mermaid\">\n\nflowchart LR\n    H[staging] -->|manual| I{plan}\n    I -->|manual| J[production]\n\u003C/pre>\n\n\n### The pipeline\n\n\nTo implement the above [workflow](#the-workflow), we are now going to\nimplement a pipeline with two [downstream\npipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html).\n\n\n#### The main pipeline\n\n\nLet's start with the main pipeline. This is the one that will be triggered\nautomatically on any **push to a feature branch**, any **merge to the\ndefault branch**, or any **tag**. *The one* that will do true **continuous\ndeployment** to the following environments: `dev`, `integration`, and\n`staging`. And it is declared in the `.gitlab-ci.yml` file at the root of\nyour project.\n\n\n![the repository\ntarget](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097033/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097033417.png)\n\n\n```yml\n\nStages:\n  - test\n  - environments\n\n.environment:\n  stage: environments\n  variables:\n    TF_ROOT: terraform\n    TF_CLI_ARGS_plan: \"-var-file=../vars/$variables_file.tfvars\"\n  trigger:\n    include: .gitlab-ci/.first-layer.gitlab-ci.yml\n    strategy: depend            # Wait for the triggered pipeline to successfully complete\n    forward:\n      yaml_variables: true      # Forward variables defined in the trigger job\n      pipeline_variables: true  # Forward manual pipeline variables and scheduled pipeline variables\n\nreview:\n  extends: .environment\n  variables:\n    environment: review/$CI_COMMIT_REF_SLUG\n    TF_STATE_NAME: $CI_COMMIT_REF_SLUG\n    variables_file: review\n    TF_VAR_aws_resources_name: $CI_COMMIT_REF_SLUG  # Used in the tag Name of the resources deployed, to easily differenciate them\n  rules:\n    - if: $CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n\nintegration:\n  extends: .environment\n  variables:\n    environment: integration\n    TF_STATE_NAME: $environment\n    variables_file: $environment\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n\nstaging:\n  extends: .environment\n  variables:\n    environment: staging\n    TF_STATE_NAME: $environment\n    variables_file: $environment\n  rules:\n    - if: $CI_COMMIT_TAG\n\n#### TWEAK\n\n# This tweak is needed to display vulnerability results in the merge\nwidgets.\n\n# As soon as this issue https://gitlab.com/gitlab-org/gitlab/-/issues/439700\nis resolved, the `include` instruction below can be removed.\n\n# Until then, the SAST IaC scanners will run in the downstream pipelines,\nbut their results will not be available directly in the merge request\nwidget, making it harder to track them.\n\n# Note: This workaround is perfectly safe and will not slow down your\npipeline.\n\ninclude:\n  - template: Security/SAST-IaC.gitlab-ci.yml\n#### END TWEAK\n\n\n```\n\n\nThis pipeline runs only two stages: `test` and  `environments`. The former\nis needed for the *TWEAK* to run scanners. The later triggers a child\npipeline with a different set of variables for each case defined above (push\nto the branch, merge to the default branch, or tag).\n\n\nWe are adding here a dependency with the keyword\n[strategy:depend](https://docs.gitlab.com/ee/ci/yaml/index.html#triggerstrategy)\non our child pipeline so the pipeline view in GitLab will be updated only\nonce the deployment is finished.\n\n\nAs you can see here, we are defining a base job,\n[hidden](https://docs.gitlab.com/ee/ci/jobs/#hide-jobs), and we are\nextending it with specific variables and rules to trigger only one\ndeployment for each target environment.\n\n\nBesides the [predefined\nvariables](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html),\nwe are using two new entries that we need to define:\n\n1. [The variables specific](#the-variable-definitions) to each environment:\n`../vars/$variables_file.tfvars`\n\n2. [The child pipeline](#the-child-pipeline), defined in\n`.gitlab-ci/.first-layer.gitlab-ci.yml`\n\n\nLet's start with the smallest part, the variable definitions.\n\n\n### The variable definitions\n\n\nWe are going here to mix two solutions to provide variables to Terraform:\n\n\n* The first one using [.tfvars\nfiles](https://developer.hashicorp.com/terraform/language/values/variables#variable-definitions-tfvars-files)\nfor all non-sensitive input, which should be stored within GitLab.\n\n\n![solution one to provide variables to\nTerraform](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097034/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097033419.png)\n\n\n* The second using [environment\nvariables](https://developer.hashicorp.com/terraform/language/values/variables#environment-variables)\nwith the prefix `TF_VAR`. That second way to inject variables, associated\nwith the GitLab capacity to [mask\nvariables](https://docs.gitlab.com/ee/ci/variables/#mask-a-cicd-variable),\n[protect\nthem](https://docs.gitlab.com/ee/ci/variables/#protect-a-cicd-variable), and\n[scope them to\nenvironments](https://docs.gitlab.com/ee/ci/environments/index.html#limit-the-environment-scope-of-a-cicd-variable)\nis a powerful solution to **prevent sensitive information leakages**. (If\nyou consider your production’s private CIDR very sensitive, you could\nprotect it like this, ensuring it is only available for the `production`\nenvironment, for pipelines running against protected branches and tags, and\nthat its value is masked in the job’s logs.)\n\n\n![solution two to provide variables to\nTerraform](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097034/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097033422.png)\n\n\nAdditionally, each variable file should be controlled through a\n[`CODEOWNERS` file](https://docs.gitlab.com/ee/user/project/codeowners/) to\nset who can modify each of them.\n\n\n```\n\n[Production owners] \n\nvars/production.tfvars @operations-group\n\n\n[Staging owners]\n\nvars/staging.tfvars @odupre @operations-group\n\n\n[CodeOwners owners]\n\nCODEOWNERS @odupre\n\n```\n\n\nThis article is not a Terraform training, so we will go very fast and simply\nshow here the `vars/review.tfvars` file. Subsequent environment files are,\nof course, very similar. Just set the non-sensitive variables and their\nvalues here.\n\n\n```shell\n\naws_vpc_cidr = \"10.1.0.0/16\"\n\naws_public_subnet_cidr = \"10.1.1.0/24\"\n\naws_private_subnet_cidr = \"10.1.2.0/24\"\n\n```\n\n\n#### The child pipeline\n\n\nThis one is where the actual work is done. So, it is slightly more complex\nthan the first one. But there is no difficulty here that we cannot overcome\ntogether!\n\n\nAs we have seen in the definition of the [main\npipeline](#the-main-pipeline), that downstream pipeline is declared in the\nfile `.gitlab-ci/.first-layer.gitlab-ci.yml`.\n\n\n![Downstream pipeline declared in\nfile](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097033/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097033424.png)\n\n\nLet's break it down into small chunks. We'll see the big picture at the end.\n\n\n##### Run Terraform commands and secure the code\n\n\nFirst, we want to run a pipeline for Terraform. We, at GitLab, are open\nsource. So, our Terraform template is open source. And you simply need to\ninclude it. This can be achieved with the following snippet:\n\n\n```yml\n\ninclude:\n  - template: Terraform.gitlab-ci.yml\n```\n\n\nThis template runs for you the Terraform checks on the formatting and\nvalidates your code, before planning and applying it. It also allows you to\ndestroy what you have deployed.\n\n\nAnd, because GitLab is the a single, unified DevSecOps platform, we are also\nautomatically including two security scanners within that template to find\npotential threats in your code and warn you before you deploy it to the next\nenvironments.\n\n\nNow that we have checked, secured, built, and deployed our code, let's do\nsome tricks.\n\n\n##### Share cache between jobs\n\n\nWe will cache the job results to reuse them in subsequent pipeline jobs.\nThis is as simple as adding the following piece of code:\n\n\n```yml\n\ndefault:\n  cache:  # Use a shared cache or tagged runners to ensure terraform can run on apply and destroy\n    - key: cache-$CI_COMMIT_REF_SLUG\n      fallback_keys:\n        - cache-$CI_DEFAULT_BRANCH\n      paths:\n        - .\n```\n\n\nHere, we are defining a different cache for each commit, falling back to the\nmain branch name if needed.\n\n\nIf we look carefully at the templates that we are using, we can see that it\nhas some rules to control when jobs are run. We want to run all controls\n(both QA and security) on all branches. So, we are going to override these\nsettings.\n\n\n##### Run controls on all branches\n\n\nGitLab templates are a powerful feature where one can override only a piece\nof the template. Here, we are interested only in overwriting the rules of\nsome jobs to always run quality and security checks. Everything else defined\nfor these jobs will stay as defined in the template.\n\n\n```yml\n\nfmt:\n  rules:\n    - when: always\n\nvalidate:\n  rules:\n    - when: always\n\nkics-iac-sast:\n  rules:\n    - when: always\n\niac-sast:\n  rules:\n    - when: always\n```\n\n\nNow that we have enforced the quality and security controls, we want to\ndifferentiate how the main environments (integration and staging) in the\n[workflow](#the-workflow) and review environments behave. Let's start by\ndefining the main environment’s behavior, and we will tweak this\nconfiguration for the review environments.\n\n\n##### CD to integration and staging\n\n\nAs defined earlier, we want to deploy the main branch and the tags to these\ntwo environments. We are adding rules to control that on both the `build`\nand `deploy` jobs. Then, we want to enable `destroy` only for the\n`integration` as we have defined `staging` to be too critical to be deleted\nwith a single click. This is error-prone and we don't want to do that.\n\n\nFinally, we are linking the `deploy` job to the `destroy` one, so we can\n`stop` the environment directly from GitLab GUI.\n\n\nThe `GIT_STRATEGY` is here to prevent retrieving the code from the source\nbranch in the runner when destroying. This would fail if the branch has been\ndeleted manually, so we are relying on the cache to get everything we need\nto run the Terraform instructions.\n\n\n```yml\n\nbuild:  # terraform plan\n  environment:\n    name: $TF_STATE_NAME\n    action: prepare\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG\n\ndeploy: # terraform apply --> automatically deploy on corresponding env\n(integration or staging) when merging to default branch or tagging. Second\nlayer environments (qa and production) will be controlled manually\n  environment: \n    name: $TF_STATE_NAME\n    action: start\n    on_stop: destroy\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG\n\ndestroy:\n  extends: .terraform:destroy\n  variables:\n    GIT_STRATEGY: none\n  dependencies:\n    - build\n  environment:\n    name: $TF_STATE_NAME\n    action: stop\n  rules:\n    - if: $CI_COMMIT_TAG  # Do not destroy production\n      when: never\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $TF_DESTROY == \"true\" # Manually destroy integration env.\n      when: manual\n```\n\n\nAs said, this matches the need to deploy to `integration` and `staging`. But\nwe are still missing a temporary environment where the developers can\nexperience and validate their code without impacts on others. This is where\nthe deployment to the `review` environment takes place.\n\n\n##### CD to review environments\n\n\nDeploying to review environment is not too different than deploying to\n`integration` and `staging`. So we will once again leverage GitLab's\ncapacity to overwrite only pieces of job definition here.\n\n\nFirst, we set rules to run these jobs only on feature branches.\n\n\nThen, we link the `deploy_review` job to `destroy_review`. This will allow\nus to stop the environment **manually** from the GitLab user interface, but\nmore importantly, it will **automatically trigger the environment\ndestruction** when the feature branch is closed. This is a good FinOps\npractice to help you control your operational expenditures.\n\n\nSince Terraform needs a plan file to destroy an infrastructure, exactly like\nit needs one to build an infrastructure, then we are adding a dependency\nfrom `destroy_review` to `build_review`, to retrieve its artifacts.\n\n\nFinally, we see here that the environment's name is set to `$environment`.\nIt has been set in the [main pipeline](#the-main-pipeline) to\n`review/$CI_COMMIT_REF_SLUG`, and forwarded to this child pipeline with the\ninstruction `trigger:forward:yaml_variables:true`.\n\n\n```yml\n\nbuild_review:\n  extends: build\n  rules:\n    - if: $CI_COMMIT_TAG\n      when: never\n    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n      when: on_success\n\ndeploy_review:\n  extends: deploy\n  dependencies:\n    - build_review\n  environment:\n    name: $environment\n    action: start\n    on_stop: destroy_review\n    # url: https://$CI_ENVIRONMENT_SLUG.example.com\n  rules:\n    - if: $CI_COMMIT_TAG\n      when: never\n    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n      when: on_success\n\ndestroy_review:\n  extends: destroy\n  dependencies:\n    - build_review\n  environment:\n    name: $environment\n    action: stop\n  rules:\n    - if: $CI_COMMIT_TAG  # Do not destroy production\n      when: never\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH   # Do not destroy staging\n      when: never\n    - when: manual\n```\n\n\nSo, to recap, we now have a pipeline that can:\n\n\n* Deploy temporary review environments, which are automatically cleaned up\nwhen the feature branch is closed\n\n* Continuously deploy the **default branch** to `integration`\n\n* Continuously deploy the **tags** to `staging`\n\n\nLet's now add an extra layer, where we will deploy, based on a manual\ntrigger this time, to `qa` and `production` environments.\n\n\n##### Continously deploy to QA and production\n\n\nBecause not everybody is willing to deploy continuously to production, we\nwill add a manual validation to the next two deployments. From a purely\n**CD** perspective, we would not add this trigger, but take this as an\nopportunity to learn how to run jobs from other triggers.\n\n\nSo far, we have started a [child pipeline](#the-child-pipeline) from the\n[main pipeline](#the-main-pipeline) to run all deployments.\n\n\nSince we want to run other deployments from the default branch and the tags,\nwe will add another layer dedicated to these additional steps. Nothing new\nhere. We will just repeat exactly the same process as the one we only did\nfor the [main pipeline](#the-main-pipeline). Going this way allows you to\nmanipulate as many layers as you need. I have already seen up to nine\nenvironments in some places.\n\n\nWithout arguing once again on the benefits to have fewer environments, the\nprocess that we are using here makes it very easy to implement the same\npipeline all the way from early stages to final delivery, while keeping your\npipeline definition simple and split in small chunks that you can maintain\nat no cost.\n\n\nTo prevent variable conflicts here, we are just using new var names to\nidentify the Terraform state and input file.\n\n\n```yml\n\n.2nd_layer:\n  stage: 2nd_layer\n  variables:\n    TF_ROOT: terraform\n  trigger:\n    include: .gitlab-ci/.second-layer.gitlab-ci.yml\n    # strategy: depend            # Do NOT wait for the downstream pipeline to finish to mark upstream pipeline as successful. Otherwise, all pipelines will fail when reaching the pipeline timeout before deployment to 2nd layer.\n    forward:\n      yaml_variables: true      # Forward variables defined in the trigger job\n      pipeline_variables: true  # Forward manual pipeline variables and scheduled pipeline variables\n\nqa:\n  extends: .2nd_layer\n  variables:\n    TF_STATE_NAME_2: qa\n    environment: $TF_STATE_NAME_2\n    TF_CLI_ARGS_plan_2: \"-var-file=../vars/$TF_STATE_NAME_2.tfvars\"\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n\nproduction:\n  extends: .2nd_layer\n  variables:\n    TF_STATE_NAME_2: production\n    environment: $TF_STATE_NAME_2\n    TF_CLI_ARGS_plan_2: \"-var-file=../vars/$TF_STATE_NAME_2.tfvars\"\n  rules:\n    - if: $CI_COMMIT_TAG\n```\n\n\n**One important trick here is the strategy used for the new downstream\npipeline.** We leave that `trigger:strategy` to its default value;\notherwise, the [main pipeline](#the-main-pipeline) would wait for your\n[grand-child pipeline](#the-grand-child-pipeline) to finish. With a manual\ntrigger, this could last for a very long time and make your pipeline\ndashboard harder to read and understand.\n\n\nYou have probably already wondered what is the content of that\n`.gitlab-ci/.second-layer.gitlab-ci.yml` file we are including here.  We\nwill cover that in the next section.\n\n\n##### The first layer complete pipeline definition\n\n\nIf you are looking for a complete view of this first layer (stored in\n`.gitlab-ci/.first-layer.gitlab-ci.yml`), just expand the section below.\n\n\n```yml\n\nvariables:\n  TF_VAR_aws_ami_id: $AWS_AMI_ID\n  TF_VAR_aws_instance_type: $AWS_INSTANCE_TYPE\n  TF_VAR_aws_default_region: $AWS_DEFAULT_REGION\n\ninclude:\n  - template: Terraform.gitlab-ci.yml\n\ndefault:\n  cache:  # Use a shared cache or tagged runners to ensure terraform can run on apply and destroy\n    - key: cache-$CI_COMMIT_REF_SLUG\n      fallback_keys:\n        - cache-$CI_DEFAULT_BRANCH\n      paths:\n        - .\n\nstages:\n  - validate\n  - test\n  - build\n  - deploy\n  - cleanup\n  - 2nd_layer       # Use to deploy a 2nd environment on both the main branch and on the tags\n\nfmt:\n  rules:\n    - when: always\n\nvalidate:\n  rules:\n    - when: always\n\nkics-iac-sast:\n  rules:\n    - if: $SAST_DISABLED == 'true' || $SAST_DISABLED == '1'\n      when: never\n    - if: $SAST_EXCLUDED_ANALYZERS =~ /kics/\n      when: never\n    - when: on_success\n\niac-sast:\n  rules:\n    - if: $SAST_DISABLED == 'true' || $SAST_DISABLED == '1'\n      when: never\n    - if: $SAST_EXCLUDED_ANALYZERS =~ /kics/\n      when: never\n    - when: on_success\n\n###########################################################################################################\n\n## Integration env. and Staging. env\n\n##  * Auto-deploy to Integration on merge to main.\n\n##  * Auto-deploy to Staging on tag.\n\n##  * Integration can be manually destroyed if TF_DESTROY is set to true.\n\n##  * Destroy of next env. is not automated to prevent errors.\n\n###########################################################################################################\n\nbuild:  # terraform plan\n  environment:\n    name: $TF_STATE_NAME\n    action: prepare\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG\n\ndeploy: # terraform apply --> automatically deploy on corresponding env\n(integration or staging) when merging to default branch or tagging. Second\nlayer environments (qa and production) will be controlled manually\n  environment: \n    name: $TF_STATE_NAME\n    action: start\n    on_stop: destroy\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG\n\ndestroy:\n  extends: .terraform:destroy\n  variables:\n    GIT_STRATEGY: none\n  dependencies:\n    - build\n  environment:\n    name: $TF_STATE_NAME\n    action: stop\n  rules:\n    - if: $CI_COMMIT_TAG  # Do not destroy production\n      when: never\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $TF_DESTROY == \"true\" # Manually destroy integration env.\n      when: manual\n###########################################################################################################\n\n\n###########################################################################################################\n\n## Dev env.\n\n##  * Temporary environment. Lives and dies with the Merge Request.\n\n##  * Auto-deploy on push to feature branch.\n\n##  * Auto-destroy on when Merge Request is closed.\n\n###########################################################################################################\n\nbuild_review:\n  extends: build\n  rules:\n    - if: $CI_COMMIT_TAG\n      when: never\n    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n      when: on_success\n\ndeploy_review:\n  extends: deploy\n  dependencies:\n    - build_review\n  environment:\n    name: $environment\n    action: start\n    on_stop: destroy_review\n    # url: https://$CI_ENVIRONMENT_SLUG.example.com\n  rules:\n    - if: $CI_COMMIT_TAG\n      when: never\n    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n      when: on_success\n\ndestroy_review:\n  extends: destroy\n  dependencies:\n    - build_review\n  environment:\n    name: $environment\n    action: stop\n  rules:\n    - if: $CI_COMMIT_TAG  # Do not destroy production\n      when: never\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH   # Do not destroy staging\n      when: never\n    - when: manual\n###########################################################################################################\n\n\n###########################################################################################################\n\n## Second layer\n\n##  * Deploys from main branch to qa env.\n\n##  * Deploys from tag to production.\n\n###########################################################################################################\n\n.2nd_layer:\n  stage: 2nd_layer\n  variables:\n    TF_ROOT: terraform\n  trigger:\n    include: .gitlab-ci/.second-layer.gitlab-ci.yml\n    # strategy: depend            # Do NOT wait for the downstream pipeline to finish to mark upstream pipeline as successful. Otherwise, all pipelines will fail when reaching the pipeline timeout before deployment to 2nd layer.\n    forward:\n      yaml_variables: true      # Forward variables defined in the trigger job\n      pipeline_variables: true  # Forward manual pipeline variables and scheduled pipeline variables\n\nqa:\n  extends: .2nd_layer\n  variables:\n    TF_STATE_NAME_2: qa\n    environment: $TF_STATE_NAME_2\n    TF_CLI_ARGS_plan_2: \"-var-file=../vars/$TF_STATE_NAME_2.tfvars\"\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n\nproduction:\n  extends: .2nd_layer\n  variables:\n    TF_STATE_NAME_2: production\n    environment: $TF_STATE_NAME_2\n    TF_CLI_ARGS_plan_2: \"-var-file=../vars/$TF_STATE_NAME_2.tfvars\"\n  rules:\n    - if: $CI_COMMIT_TAG\n###########################################################################################################\n\n```\n\n\nAt this stage, we are already deploying safely to three environments. That\nis my personal ideal recommendation. However, if you need more environments,\nadd them to your CD pipeline.\n\n\nYou have certainly already noted that we include a downstream pipeline with\nthe keyword `trigger:include`. This includes the file\n`.gitlab-ci/.second-layer.gitlab-ci.yml`. We want to run almost the same\npipeline so obviously, its content is very similar to the one we have\ndetailed above. The main advantage here to define this [grand-child\npipeline](#the-grand-child-pipeline) is that it lives on its own, making\nboth variables and rules way easier to define.\n\n\n### The grand-child pipeline\n\n\nThis second layer pipeline is a brand new pipeline. Hence, it needs to mimic\nthe first layer definition with:\n\n\n* [Inclusion of the Terraform\ntemplate](#run-terraform-commands-and-secure-the-code).\n\n* [Enforcement of security checks](#run-controls-on-all-branches). Terraform\nvalidation would be duplicates of the first layer, but security scanners may\nfind threats that did not yet exist when scanners previously ran (for\nexample, if you deploy to production a couple of days after your deployment\nto staging).\n\n* [Overwrite build and deploy jobs to set specific\nrules](#cd-to-review-environments). Note that the `destroy` stage is no\nlonger automated to prevent too fast deletions.\n\n\nAs explained above, the `TF_STATE_NAME` and `TF_CLI_ARGS_plan` have been\nprovided from the [main pipeline](#the-main-pipeline) to the [child\npipeline](#the-child-pipeline). We needed another variable name to pass\nthese values from the [child pipeline](#the-child-pipeline) to here, the\n[grand-child pipeline](#the-grand-child-pipeline). This is why they are\npostfixed with `_2` in the child pipeline and the value is copied back to\nthe appropriate variable during the `before_script` here.\n\n\nSince we have already broken down each step above, we can zoom out here\ndirectly to the broad view of the global second layer definition (stored in\n`.gitlab-ci/.second-layer.gitlab-ci.yml`).\n\n\n```yml\n\n# Use to deploy a second environment on both the default branch and the\ntags.\n\n\ninclude:\n  template: Terraform.gitlab-ci.yml\n\nstages:\n  - validate\n  - test\n  - build\n  - deploy\n\nfmt:\n  rules:\n    - when: never\n\nvalidate:\n  rules:\n    - when: never\n\nkics-iac-sast:\n  rules:\n    - if: $SAST_DISABLED == 'true' || $SAST_DISABLED == '1'\n      when: never\n    - if: $SAST_EXCLUDED_ANALYZERS =~ /kics/\n      when: never\n    - when: always\n\n###########################################################################################################\n\n## QA env. and Prod. env\n\n##  * Manually trigger build and auto-deploy in QA\n\n##  * Manually trigger both build and deploy in Production\n\n##  * Destroy of these env. is not automated to prevent errors.\n\n###########################################################################################################\n\nbuild:  # terraform plan\n  cache:  # Use a shared cache or tagged runners to ensure terraform can run on apply and destroy\n    - key: $TF_STATE_NAME_2\n      fallback_keys:\n        - cache-$CI_DEFAULT_BRANCH\n      paths:\n        - .\n  environment:\n    name: $TF_STATE_NAME_2\n    action: prepare\n  before_script:  # Hack to set new variable values on the second layer, while still using the same variable names. Otherwise, due to variable precedence order, setting new value in the trigger job, does not cascade these new values to the downstream pipeline\n    - TF_STATE_NAME=$TF_STATE_NAME_2\n    - TF_CLI_ARGS_plan=$TF_CLI_ARGS_plan_2\n  rules:\n    - when: manual\n\ndeploy: # terraform apply\n  cache:  # Use a shared cache or tagged runners to ensure terraform can run on apply and destroy\n    - key: $TF_STATE_NAME_2\n      fallback_keys:\n        - cache-$CI_DEFAULT_BRANCH\n      paths:\n        - .\n  environment: \n    name: $TF_STATE_NAME_2\n    action: start\n  before_script:  # Hack to set new variable values on the second layer, while still using the same variable names. Otherwise, due to variable precedence order, setting new value in the trigger job, does not cascade these new values to the downstream pipeline\n    - TF_STATE_NAME=$TF_STATE_NAME_2\n    - TF_CLI_ARGS_plan=$TF_CLI_ARGS_plan_2\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG && $TF_AUTO_DEPLOY == \"true\"\n    - if: $CI_COMMIT_TAG\n      when: manual\n###########################################################################################################\n\n```\n\n\nEt voilà. **We are ready to go.** Feel free to change the way you control\nyour job executions, leveraging for example GitLab's capacity to [delay a\njob](https://docs.gitlab.com/ee/ci/jobs/job_control.html#run-a-job-after-a-delay)\nbefore deploying to production.\n\n\n## Try it yourself\n\n\nWe finally reached our destination. We are now able to control **deployments\nto five different environments**, with only the **feature branches**, the\n**main branch**, and **tags**.\n\n* We are intensively reusing GitLab open source templates to ensure\nefficiency and security in our pipelines.\n\n* We are leveraging GitLab template capacities to overwrite only the blocks\nthat need custom control.\n\n* We have split the pipeline in small chunks, controlling the downstream\npipelines to match exactly what we need.\n\n\nFrom there, the floor is yours. You could, for example, easily update the\nmain pipeline to trigger downstream pipelines for your software source code,\nwith the\n[trigger:rules:changes](https://docs.gitlab.com/ee/ci/yaml/#ruleschanges)\nkeyword. And use another\n[template](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates/)\ndepending on the changes that happened. But that is another story.\n",[9,785,786,495,917],{"slug":5249,"featured":6,"template":700},"using-child-pipelines-to-continuously-deploy-to-five-environments","content:en-us:blog:using-child-pipelines-to-continuously-deploy-to-five-environments.yml","Using Child Pipelines To Continuously Deploy To Five Environments","en-us/blog/using-child-pipelines-to-continuously-deploy-to-five-environments.yml","en-us/blog/using-child-pipelines-to-continuously-deploy-to-five-environments",{"_path":5255,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5256,"content":5262,"config":5267,"_id":5269,"_type":14,"title":5270,"_source":16,"_file":5271,"_stem":5272,"_extension":19},"/en-us/blog/using-gitlab-web-ide-gitlab-ci-cd",{"title":5257,"description":5258,"ogTitle":5257,"ogDescription":5258,"noIndex":6,"ogImage":5259,"ogUrl":5260,"ogSiteName":685,"ogType":686,"canonicalUrls":5260,"schema":5261},"How to make small changes using GitLab’s Web IDE","A quick three minute demo shows how teams can deliver better apps faster using GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678812/Blog/Hero%20Images/web-ide-cover.jpg","https://about.gitlab.com/blog/using-gitlab-web-ide-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to make small changes using GitLab’s Web IDE\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-05-28\",\n      }",{"title":5257,"description":5258,"authors":5263,"heroImage":5259,"date":5264,"body":5265,"category":1040,"tags":5266},[715],"2020-05-28","\n\nIt’s not enough to say something is quick and easy. To have a better understanding of some of the benefits of using [GitLab CI/CD](/topics/ci-cd/), it’s much better to _show_ you.\n\nIn a [short video](https://www.youtube.com/watch?v=6207TKNGgJs&feature=emb_logo), [Itzik Gan-Baruch](/company/team/#iganbaruch) technical marketing manager, demonstrates how to submit a code change using GitLab Web IDE. In three minutes, teams can submit a code change and commit it, trigger a CI pipeline to scan for any errors, and ship the updated application to users.\n\n## Getting started with GitLab Web IDE\n\nAll code that gets automatically tested and deployed to production has a human at its source. In GitLab 10.7, we released the [first iteration of our Web Integrated Development Environment (IDE)](/blog/introducing-gitlab-s-integrated-development-environment/) after observing how non-developers struggled with editing multiple files and committing those changes. Since we believe that [everyone can contribute](/company/mission/#mission), building an editor that was integrated with GitLab that made it easier for anyone to contribute seemed like a natural fit. To access the Web IDE, just click the button from any GitLab project.\n\n![Web IDE](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_1.png){: .shadow.medium.center}\n\nThe Web IDE button\n{: .note.text-center}\n\nIn this simple project with a job application, you can use the Web IDE to make a code change and push it to a feature branch. Select the file you would like to change from the menu on the left.\n\n![Selecting a file](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_2.png){: .shadow.medium.center}\n\nSelecting a file from the Wed IDE\n{: .note.text-center}\n\nOnce you’ve modified the text in that file, add a commit message and create a new branch. Click `Commit` to create a merge request.\n\n![Commit](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_3.png){: .shadow.medium.center}\n\nCommit to create a merge request\n{: .note.text-center}\n\nYour commit generates a merge request, and from here you can add an assignee, tie this code change to a specific milestone, add labels, or add any additional information regarding the change.\n\n![Modify merge request](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_4.png){: .shadow.medium.center}\n\nSubmit merge request\n{: .note.text-center}\n\nA new [continuous integration pipeline](/solutions/continuous-integration/) is triggered automatically. Click on the pipeline to see the stages.\n\n![Pipeline](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_5.png){: .shadow.medium.center}\n\nClick on the pipeline from the merge request\n{: .note.text-center}\n\nIn this project, the pipeline needed zero-configuration because it was generated through GitLab's [Auto DevOps](/direction/delivery/auto_devops/) capability. The pipeline has stages and a few jobs within each stage.\n\n![Auto DevOps pipeline](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_6.png){: .shadow.medium.center}\n\nA CI pipeline automatically configured with GitLab Auto DevOps\n{: .note.text-center}\n\nFirst, it builds a Docker image for the code and pushes it to the container registry. From there, it begins tests and scans jobs that run in parallel to help speed up the pipeline.\n\n![Pipeline jobs](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_7.png){: .shadow.medium.center}\n\nClick on a job within the pipeline stage to get more information\n{: .note.text-center}\n\nBy clicking on a job within the stage, you can see what happens.\n\n![dependency scan](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_8.png){: .shadow.medium.center}\n\nDependency scanning details\n{: .note.text-center}\n\nOnce all tests are completed, all test results will be added to the merge request that was created. The merge request is really the key to using GitLab as a code collaboration and [version control platform](/topics/version-control/). It’s simply a request to merge one branch into another.\n\n![merge requests](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_9.png){: .shadow.medium.center}\n\nMerge requests for this project\n{: .note.text-center}\n\n[Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) are a way to visualize the changes that were made. Click `View app` once the pipeline has completed to access the staging environment.\n\n![Review apps](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_10.png){: .shadow.medium.center}\n\nSelect `View app` to access a staging environment once a pipeline completes.\n{: .note.text-center}\n\nIn this environment, only changes that were made in the merge request will be displayed. This link can be sent to others so they can view the changes from a web browser.\n\n![staging environment](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_12.png){: .shadow.medium.center}\n\nThe Review App for this project\n{: .note.text-center}\n\nFrom the merge request, you can see the test results, including changes to code quality and the security scans. This scan detected 20 new vulnerabilities. If you’d like more information, just click `Expand` on the right.\n\n![pipeline test results](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_13.png){: .shadow.medium.center}\n\nPipeline test results\n{: .note.text-center}\n\nOnce the results have been expanded, you can click on each one to get more details.\n\n![SAST scan](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_14.png){: .shadow.medium.center}\n\nSAST vulnerabilities detected\n{: .note.text-center}\n\nBy clicking on one of these results, you can see the file that caused the vulnerability as well as the problematic lines of code.\n\n![security report](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_15.png){: .shadow.medium.center}\n\nSecurity report\n{: .note.text-center}\n\nFrom this menu, you can choose to dismiss the vulnerability or create an issue so that someone can fix it. Details from the test will be added to the issue automatically.\n\n![new issue](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_16.png){: .shadow.medium.center}\n\nA new issue created to investigate a vulnerability\n{: .note.text-center}\n\nFrom your original merge request, you can collaborate with others and have them take a look at the proposed changes.\n\n![collaborate on merge request](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_17.png){: .shadow.medium.center}\n\nTag someone in a merge request to have them see your changes\n{: .note.text-center}\n\nOnce you’ve gathered feedback and all pipelines have passed, click the `merge` button to trigger a new pipeline to deploy your application to production\n\n![Web IDE](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_18.png){: .shadow.medium.center}\n\nClick `merge` to trigger a deployment pipeline\n{: .note.text-center}\n\nThis workflow shows how anyone can contribute code without using a command line. The Web IDE makes it easy for anyone to make changes without introducing additional risks or quality issues, all from the GitLab interface.\n\nTo see this three-minute demo in real-time, just watch the video below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/6207TKNGgJs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[9,875,917],{"slug":5268,"featured":6,"template":700},"using-gitlab-web-ide-gitlab-ci-cd","content:en-us:blog:using-gitlab-web-ide-gitlab-ci-cd.yml","Using Gitlab Web Ide Gitlab Ci Cd","en-us/blog/using-gitlab-web-ide-gitlab-ci-cd.yml","en-us/blog/using-gitlab-web-ide-gitlab-ci-cd",{"_path":5274,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5275,"content":5281,"config":5286,"_id":5288,"_type":14,"title":5289,"_source":16,"_file":5290,"_stem":5291,"_extension":19},"/en-us/blog/vault-integration-process",{"title":5276,"description":5277,"ogTitle":5276,"ogDescription":5277,"noIndex":6,"ogImage":5278,"ogUrl":5279,"ogSiteName":685,"ogType":686,"canonicalUrls":5279,"schema":5280},"How we’ll simplify Vault access for GitLab CI/CD users","CEO Sid Sijbrandij and senior product manager Thao Yeager discuss the easiest way to bring Vault access to GitLab customers. Hint: it involves a minimum viable change.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681067/Blog/Hero%20Images/vaultintegration.jpg","https://about.gitlab.com/blog/vault-integration-process","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we’ll simplify Vault access for GitLab CI/CD users\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-02-13\",\n      }",{"title":5276,"description":5277,"authors":5282,"heroImage":5278,"date":5283,"body":5284,"category":1062,"tags":5285},[1037],"2020-02-13","\n\nIn the increasingly complex and secret-filled world of software development, companies must decide how to simplify access to the secure data necessary to [run CI/CD](/topics/ci-cd/) jobs. [HashiCorp’s Vault](https://www.vaultproject.io/docs/what-is-vault/) is a solution many organizations have chosen to manage their secret storage, but what’s the best way to request Vault tokens without slowing down software development?\n\nGitLab CEO [Sid Sijbrandij](/company/team/#sytses) recently sat down to speak with [Thao Yeager](/company/team/#thaoyeager), senior product manager, Verify: Templates and [Jackie Meshell](/company/team/#jmeshell), senior product manager,  to discuss how our customers using Vault could best get access to their stored variables within GitLab. On the table were two options that would provide a [“minimum viable change” or MVC](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc) – a key part of our [core value of iteration](/blog/power-of-iteration/) and a strategy we believe enables us to move more quickly.\n\nThe two options on the table were to either use [GitLab Runner](https://docs.gitlab.com/runner/) to fetch tokens that are stored in Vault or to do a [Rails integration](/blog/why-we-use-rails-to-build-gitlab/) with Vault. \n\n## Rotate if you can't hide\n\nIn earlier days, Sid recalls “secrets management” used to be about making sure people simply didn’t find them out. That’s not practical any longer. “It’s super hard never to push a secret in your repository and have it end up somewhere,” he says. “It’s almost impossible. It ends up in the logs. They radiate everywhere.”\n\nToday’s secret management involves rotating and updating secrets as often as necessary. It can be tricky to put all the pieces together.\n\nIt starts with Vault, which Sid sees as just another data store, like a database but one just focused on secrets. “We should use Vault for secrets because it’s starting to become the standard for that,” he says. But we can also no longer assume that secrets will always remain safe, and thus it’s key to be able to rotate them. “At some point you’re going to have a breach and they’re going to find out all your secrets,” he says. Rotation is a way “secrets can get back to a state where they’re secrets again,” Sid says, adding that Vault is very good for rotation and access control.\n\n> Secrets management used to be about making sure people simply didn’t find them out. That’s not practical any longer.\n\nIt’s clear to Sid that GitLab’s super sensitive data should be separated into Vault. “We can better secure (Vault). For example, we can [rate limit](https://docs.gitlab.com/ee/security/rate_limits.html). If we’re going to rate limit our Rails app calls to the database that would involve dealing with an enormous amount of traffic. If we just have the secrets in Vault, it’s a much more limited set of traffic.” One other advantage: Vault has way less surface area than our Rails app or our database, Sid explains. “(Vault) is complex to run but the surface area is smaller.”\n\n## A simpler solution\n\nWith Vault on the backend holding the secrets, Sid thinks a simple runner – instructed by Rails – is the right MVC to move this project forward. It was not necessary to do the more complicated Rails integration. “All of the logic is in the Rails app and then it sends it off to the runner with ‘Ok, run the script.’” This solution keeps the complexity in the Rails app, which works because “Ruby is great for all that complexity,” Sid says. The runner can be something simple that lives on multiple platforms. “The last thing we want is the runner to have more dependencies and more complexity.”\n\nUltimately Sid expects customers will want to use their own Vault installations, but for now that reality is complicated. Integrating GitLab with Vault is the more straightforward solution for the time being. And it’s certainly the safest: Vault won’t give out the same credential twice and the credentials have a very short life span, two things that will make a breach less dangerous, Sid says. “You will never have another secret that can’t be rotated,” Sid says. “Every secret is able to be rotated so you can always push that button. That's the future we're working towards and we should make that future easier for our users to adopt.”\n\nWatch the entire video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/9kD3geEmSJ8\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCover image by [Chris Barbalis](https://unsplash.com/@cbarbalis) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9,697,232],{"slug":5287,"featured":6,"template":700},"vault-integration-process","content:en-us:blog:vault-integration-process.yml","Vault Integration Process","en-us/blog/vault-integration-process.yml","en-us/blog/vault-integration-process",{"_path":5293,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5294,"content":5300,"config":5307,"_id":5309,"_type":14,"title":5310,"_source":16,"_file":5311,"_stem":5312,"_extension":19},"/en-us/blog/vcc-with-a-single-app",{"title":5295,"description":5296,"ogTitle":5295,"ogDescription":5296,"noIndex":6,"ogImage":5297,"ogUrl":5298,"ogSiteName":685,"ogType":686,"canonicalUrls":5298,"schema":5299},"End-to-end DevOps with version control & collaboration","Version Control & Collaboration is centered at the core of your end-to-end DevOps single application needs","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681652/Blog/Hero%20Images/markus-spiske-MkwAXj8LV8c-unsplash.png","https://about.gitlab.com/blog/vcc-with-a-single-app","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A single application for your end-to-end DevOps needs starts with Version Control & Collaboration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tye Davis\"}],\n        \"datePublished\": \"2020-10-07\",\n      }",{"title":5301,"description":5296,"authors":5302,"heroImage":5297,"date":5304,"body":5305,"category":978,"tags":5306},"A single application for your end-to-end DevOps needs starts with Version Control & Collaboration",[5303],"Tye Davis","2020-10-07","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n[Source code](/solutions/source-code-management/) acts as a single source of truth and a collection of a product’s knowledge, history, and solutions. [Version control](/topics/version-control/) serves as a safety net to protect the source code from irreparable harm, giving the development team the freedom to experiment without fear of causing damage or creating code conflicts. If developers code concurrently and create incompatible changes, version control identifies the problem areas so that team members can quickly revert changes to a previous version, compare changes, or identify who committed the problem code through the revision history. \n\nWith [version control systems](/topics/version-control/), a software team can solve an issue before progressing further into a project. Through code reviews, software teams can analyze earlier versions to understand how a solution evolved. With source code as a starting point to GitLab being a single source of truth, the capabilities expand beyond version control systems that does everything from project planning and source code management, to CI/CD, monitoring, and security.\n\nGitLab enables portfolio planning and management through epics, groups (programs) iterations and milestones to organize and track progress. Regardless of your methodology from Waterfall to DevOps, GitLab’s simple and flexible approach to planning meets the needs of small teams to large enterprises. GitLab helps teams organize, plan, align and track project work to ensure teams are working on the right things at the right time and maintain end to end visibility and traceability of issues throughout the delivery lifecycle from idea to production.\n\nGitLab helps teams design, develop and securely manage code and project data from a single distributed version control system to enable rapid iteration and delivery of business value. GitLab repositories provide a scalable, single source of truth for collaborating on projects and code which enables teams to be productive without disrupting their workflows.\n\nGitLab helps delivery teams fully embrace continuous integration to automate the builds, integration and verification of their code. GitLab’s industry leading CI capabilities enables automated testing, Static Analysis Security Testing, Dynamic Analysis Security testing and code quality analysis to provide fast feedback to developers and testers about the quality of their code. With pipelines that enable concurrent testing and parallel execution, teams quickly get insight about every commit, allowing them to deliver higher quality code faster.\n\nGitLab enables teams to package their applications and dependencies, manage containers, and build artifacts with ease. The private, secure container registry and artifact repositories are built-in and preconfigured out-of-the box to work seamlessly with GitLab source code management and CI/CD pipelines. Ensure [DevOps acceleration with automated software](/topics/devops/) pipelines that flow freely without interruption\n\nGitLab allows for security practices to be enabled at the creation phases of the project, increasing resilience to external attacks, internal threats and ability to resume activity promptly. GitLab incorporates several security capabilities to make development more secure even before build happens. They can detect secrets and license compliance issues, find vulnerabilities in source code or in its dependencies before merging, find vulnerabilities in running apps before merging, as well as containers. These capabilities can even reproduce what thousand of users input in seconds to detect faults and other issues with Fuzz testing. \n\nWith zero-touch Continuous Delivery (CD) built right into the pipeline, deployments can be automated to multiple environments like staging and production, and the system just knows what to do without being told - even for more advanced patterns like canary deployments. With feature flags, built-in auditing/traceability, on-demand environments, and GitLab pages for static content delivery, you'll be able to deliver faster and with more confidence than ever before.\n\nGitLab helps teams to configure and manage their application environments. Strong integration to Kubernetes reduces the effort needed to define and configure the infrastructure required to support your application. Protect access to key infrastructure configuration details such as passwords and login information by using ‘secret variables’ to limit access to only authorized users and processes. GitLab looks to provide feedback that decreases the frequency and severity of incidents and improves operational and product performance.\n\nSource code management is more than simply tracking changes, versions, and branches of code.  The ability to connect every phase of the Software Development Lifecycle in one [single DevOps platform](/solutions/devops-platform/) via one data layer, one design system, one set of commands to manage all the different stages of software design, development, build and deployment gives an organization a resource like no other. \n\nWatch this short video (3 minutes) to learn how [VC&C Single App](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/) on GitLab.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/SAfpdJ7jpHQ\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\nCover image by [Markus Spiske](https://unsplash.com/@markusspiske) on [Unsplash](https://unsplash.com/)\n{: .note}\n\n",[9,697,999],{"slug":5308,"featured":6,"template":700},"vcc-with-a-single-app","content:en-us:blog:vcc-with-a-single-app.yml","Vcc With A Single App","en-us/blog/vcc-with-a-single-app.yml","en-us/blog/vcc-with-a-single-app",{"_path":5314,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5315,"content":5321,"config":5328,"_id":5330,"_type":14,"title":5331,"_source":16,"_file":5332,"_stem":5333,"_extension":19},"/en-us/blog/verizon-customer-story",{"title":5316,"description":5317,"ogTitle":5316,"ogDescription":5317,"noIndex":6,"ogImage":5318,"ogUrl":5319,"ogSiteName":685,"ogType":686,"canonicalUrls":5319,"schema":5320},"Verizon cuts datacenter rebuilds from 30 days to 8 hours","Verizon utilized microservices, automation, and GitLab to reduce datacenter rebuilds to under 8 hours.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678933/Blog/Hero%20Images/verizon_video_blog.jpg","https://about.gitlab.com/blog/verizon-customer-story","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Verizon Connect reduced datacenter rebuilds from 30 days to under 8 hours with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kim Lock\"}],\n        \"datePublished\": \"2019-02-14\",\n      }",{"title":5322,"description":5317,"authors":5323,"heroImage":5318,"date":5325,"body":5326,"category":783,"tags":5327},"How Verizon Connect reduced datacenter rebuilds from 30 days to under 8 hours with GitLab",[5324],"Kim Lock","2019-02-14","\nIn 2016, the [Verizon Connect](https://www.verizonconnect.com/) Telematics Container Cloud Platform team was struggling with data center\nbuilds that took 30 days. Working with legacy systems that included Java-based, monolithic\napplications, they also had a variety of disparate tools including BitBucket, Jenkins, and Jira\nin use throughout their environment.\n\n### Starting from scratch to move to microservices and increase automation\n\nThe group looked to move to a [microservices architecture](/blog/strategies-microservices-architecture/) to improve deploy speed and increase\nautomation. They also wanted to overcome manual errors, disjointed processes, and\nmanual deploys. \"We were just spending too much time doing stuff manually, so we decided\nto just start fresh and write everything from scratch,\" says Mohammed Mehdi, Principal DevOps, Verizon.\n\nAs they created this new infrastructure, they kept four key components in mind: architecture,\nautomation, extensibility, and being proactive and prepared for the future. They wanted to rebuild\ntheir data centers in less than 12 hours, instead of 30 days. They had a goal of 100 percent CI/CD.\nThey wanted to remove manual deployments, especially around the server and network deployments.\nThe team also focused on avoiding vendor lock-in by seeking open source tools to help them accomplish these goals.\n\nThe team looked to improve automation by focusing on simplification, standardization, and providing end-to-end visibility.\n\"We wanted easily repeatable, with zero-touch, zero-downtime deployments, automated tracking,\" Mehdi explains.\n\n### A single solution to meet their needs\n\nThe team chose GitLab to support this infrastructure initiative because it met a number of their qualifications, including being open source and offering Windows support. The team liked that it is easy to use and the UI easy to understand.\n\n\"Some of the other features that we really loved, and we didn’t find with any other CI/CD tool, are the project management\nfeatures,\" Mehdi says. \"GitLab replaced a bunch of disparate systems for us like Jira, BitBucket, and Jenkins. GitLab\nprovided us with a one-stop solution.\"\n\nThe Verizon Connect Telematics Container Cloud Platform team is using GitLab for:\n\n- [Code review](/blog/demo-mastering-code-review-with-gitlab/)\n- [CI/CD](/solutions/continuous-integration/)\n- [Issue tracking](/pricing/feature-comparison/)\n- [Source Code Management](/solutions/source-code-management/)\n- [Audit Management](https://docs.gitlab.com/ee/administration/audit_events.html)\n- [ChatOps](https://docs.gitlab.com/ee/ci/chatops/)\n\nThe team has successfully achieved deployment flexibility and are platform agnostic. They now have\nstreamlined processes and developers can truly focus on differentiating tasks.\n\nThe team was able to reduce their complete datacenter deploy\nprocess to under eight hours because of the streamlined deploy and build processes\nthey enabled using GitLab. Learn how [Verizon Connect](https://www.verizonconnect.com/) is achieving this success by watching\nmore about their story and how they achieved their targets in [the YouTube video](https://youtu.be/zxMFaw5j6Zs) below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/zxMFaw5j6Zs\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThanks for giving GitLab a shot, Verizon Connect!\n\nCover image by [chuttersnap](https://unsplash.com/@chuttersnap) on [Unsplash](https://unsplash.com)\n{: .note}\n",[720,9,721,763,875],{"slug":5329,"featured":6,"template":700},"verizon-customer-story","content:en-us:blog:verizon-customer-story.yml","Verizon Customer Story","en-us/blog/verizon-customer-story.yml","en-us/blog/verizon-customer-story",{"_path":5335,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5336,"content":5341,"config":5347,"_id":5349,"_type":14,"title":5350,"_source":16,"_file":5351,"_stem":5352,"_extension":19},"/en-us/blog/vscode-extension-development-with-gitlab",{"title":5337,"description":5338,"ogTitle":5337,"ogDescription":5338,"noIndex":6,"ogImage":3199,"ogUrl":5339,"ogSiteName":685,"ogType":686,"canonicalUrls":5339,"schema":5340},"VS Code extension development with GitLab","As VS Code editor increases in popularity, find out how GitLab + VS Code can be used for extension development and how we develop the official GitLab VS Code extension.","https://about.gitlab.com/blog/vscode-extension-development-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"VS Code extension development with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tomas Vik\"}],\n        \"datePublished\": \"2020-11-30\",\n      }",{"title":5337,"description":5338,"authors":5342,"heroImage":3199,"date":5344,"body":5345,"category":718,"tags":5346},[5343],"Tomas Vik","2020-11-30","\n## What is Visual Studio Code (VSC)?\n\nMicrosoft Visual Studio Code (VS Code) is an extensible text editor. It's implemented in TypeScript and runs on Node 12 and Electron. It was [first released in 2015](https://github.com/microsoft/vscode/releases/tag/0.10.1), and since then, become widely popular[^2]. This post explains the basics about the development of VS Code extensions, shows how you can use GitLab for extension development, and shares how we build the official [GitLab VS Code extension](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow).\n\n## VS Code editor key features\n\nFor me, the key feature of the VS Code editor is that it created a platform for extensions. That means not just providing an API for extensions (which editors have done since the '90s [^3]) but also providing a marketplace and seamless way of publishing and updating extensions.\n\nThere is also a fully open source version of the VS Code called [VSCodium](https://vscodium.com/). This version removes some proprietary Microsoft code from the distribution and is analogous to the Google Chrome and Chromium projects.\n\n## VS Code extension\n\nVS Code extension is a JavaScript or TypeScript app that runs in node and has access to the [VS Code Extension API](https://code.visualstudio.com/api). The convenient thing about this architecture is that the extension is like any other node app and has full access to the host machine and network. It can choose its own library for network connection, manipulating file systems, and also for rendering web UI.\n\n## Extension API\n\nThe extension API is implemented in TypeScript; it allows users to manipulate almost every aspect of the editor. After months of using it, I find the design elegant (with the exception of testing, which seems to be an afterthought in many areas of the API).\n\nThe main features of the API are manipulating and searching the files, editing text, creating custom left panels and status bars, debuggers, custom webview tabs, (Jupyter) notebook providers, and more. The API also provides a simple way to communicate with the user via input fields and quick-pick panels, as well as showing output with info, warning, or error messages.\n\n## Extension Marketplace\n\nIf you are familiar with either AppStore or PlayStore, you'll find VS Code has an equivalent store called [Visual Studio Marketplace](https://marketplace.visualstudio.com/search?target=VSCode), and unlike on its older siblings, everything[^4] is for free. Both the easy browsing experience for the user and the ease of use for a developer are differentiators for VS Code.\n\nAs a developer, you set up your [Azure Cloud token](https://code.visualstudio.com/api/working-with-extensions/publishing-extension#get-a-personal-access-token) and then run `vsce publish` in your extension folder. That's it, within a few minutes, most of your users[^5] are running the latest and greatest version of your extension. This process greatly reduces the pressure on developers to get everything right before releasing, enabling faster iteration.\n\nThere is also an independent marketplace called [open-vsx](https://open-vsx.org/) used mainly by VSCodium but also by [GitPod](https://docs.gitlab.com/ee/integration/gitpod.html) and others.\n\n## Developing extensions in GitLab\n\nIf you'd like to try and develop your own extension, you can fork the [`gitlab-example-extension`](https://gitlab.com/viktomas/gitlab-example-extension) project. It contains a complete setup for linting, unit and integration testing, and publishing the extension to both [Visual Studio Marketplace](https://marketplace.visualstudio.com/search?target=VSCode) and [open-vsx](https://open-vsx.org/). Thanks to GitLab being a single platform for the whole [DevOps lifecycle](/topics/devops/), you can just push your code changes to GitLab, and CI/CD takes care of everything else. As always, if you find any useful tweaks, please submit an MR because [everyone can contribute](/company/mission/#mission).\n\nYou can see what the VS Code extension API offers in the [official documentation](https://code.visualstudio.com/api). You can then have a look at [extension examples](https://code.visualstudio.com/api/extension-guides/overview) and extend them to make the VS Code editor do almost anything you want.\n\n## Our extension: GitLab Workflow\n\nIn June the [GitLab Workflow extension became officially supported by GitLab](/blog/use-gitlab-with-vscode/). Since then we've done a lot of cleanup work and bug fixes. Recently, we released our first larger feature: [Inserting GitLab project snippets](https://about.gitlab.com/releases/2020/11/22/gitlab-13-6-released/#insert-gitlab-snippets-directly-in-vs-code).\n\nThe primary purpose of the extension is to integrate GitLab features into the editor, so users don't have to leave the editor to perform basic tasks such as read an issue description or create a snippet from the code. The extension is trying to plug in the GitLab features into an existing VS Code Extension API to both minimise the need for custom code and to make the experience as VS Code-like as possible.\n\nThere are several main areas of the VS Code Extension API that we take advantage of:\n\n### Commands\n\n[Commands](https://code.visualstudio.com/api/extension-guides/command) are a versatile concept for triggering actions. The most common way to trigger commands is to use the \u003Ckbd>Cmd\u003C/kbd>+\u003Ckbd>Shift\u003C/kbd>+\u003Ckbd>P\u003C/kbd> Command Palette. But commands can also be triggered from context menus, clicks on buttons, or even programmatically by other code in the extension. The most common example of triggering commands programatically is to call the `vscode.open` command with a URL as a parameter. GitLab workflow does that every time we open the GitLab web page[^6].\n\n![Command Palette](https://about.gitlab.com/images/blogimages/vscode-extension-development-with-gitlab/commands.png){: .shadow.medium.center}\nCommand Palette in GitLab Workflow\n{: .note .text-center}\n\n### Tree View\n\nVS Code uses the [Tree View](https://code.visualstudio.com/api/extension-guides/tree-view) for displaying the left panel. The panel shows the file tree for the project, changed Git files, an outline of the open file, full-text search results, and more. We use this Tree View panel to show lists of issues and merge requests.\n\n![Tree View](https://about.gitlab.com/images/blogimages/vscode-extension-development-with-gitlab/tree-view.png){: .shadow.medium.center}\nTree View in GitLab Workflow\n{: .note .text-center}\n\n### Status bar\n\n[Status bar](https://code.visualstudio.com/api/extension-capabilities/extending-workbench#status-bar-item) is the slim panel at the bottom of the editor. Any extension can add items to it. Extensions such as Git, spell checks, linters, and formatters all add items to the status bar to provide the user with quick feedback.\n\nThe GitLab Workflow extension shows the MR, issue, and pipeline for the current branch. It, for example, allows you to see if your pipeline failed after the last push.\n\n![Status bar](https://about.gitlab.com/images/blogimages/vscode-extension-development-with-gitlab/status-bar.png){: .shadow.medium.center}\nStatus bar in GitLab Workflow\n{: .note .text-center}\n\nAltogether the VS Code API provides a great foundation for bringing GitLab features closer to the editor. The GitLab VS Code extension is an exciting project that **you too can contribute to**. The best place to start is the [GitLab project page](https://gitlab.com/gitlab-org/gitlab-vscode-extension).\n\n[^2]: [17th most popular project on GitHub](https://github.com/search?p=2&q=stars%3A%3E100&s=stars&type=Repositories) at the time of writing (2020-11-20)\n[^3]: GNU Emacs supported Lisp extensions in [1985](https://en.wikipedia.org/wiki/Emacs#GNU_Emacs)\n[^4]: I haven't been able to find a paid extension in the store.\n[^5]: The auto-update feature is on by default in VS Code, but it can be turned off in which case your users are not going to auto-update.\n[^6]: [Using `vscode.open` in the GitLab Workflow](https://gitlab.com/search?utf8=%E2%9C%93&search=vscode.open&group_id=9970&project_id=5261717&scope=&search_code=true&snippets=false&repository_ref=main&nav_source=navbar)\n\n[Cover image](https://art.ljubicapetkovic.com/cc-licensed/) by [Ljubica Petkovic](https://art.ljubicapetkovic.com), licensed under [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/)\n{: .note}\n",[232,917,9],{"slug":5348,"featured":6,"template":700},"vscode-extension-development-with-gitlab","content:en-us:blog:vscode-extension-development-with-gitlab.yml","Vscode Extension Development With Gitlab","en-us/blog/vscode-extension-development-with-gitlab.yml","en-us/blog/vscode-extension-development-with-gitlab",{"_path":5354,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5355,"content":5361,"config":5367,"_id":5369,"_type":14,"title":5370,"_source":16,"_file":5371,"_stem":5372,"_extension":19},"/en-us/blog/vuejs-app-gitlab",{"title":5356,"description":5357,"ogTitle":5356,"ogDescription":5357,"noIndex":6,"ogImage":5358,"ogUrl":5359,"ogSiteName":685,"ogType":686,"canonicalUrls":5359,"schema":5360},"How to use GitLab CI/CD for Vue.js","Learn how to get the most out of GitLab CI/CD with this guide.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680363/Blog/Hero%20Images/build-test-deploy-vue.jpg","https://about.gitlab.com/blog/vuejs-app-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab CI/CD for Vue.js\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Simon Tarchichi\"}],\n        \"datePublished\": \"2017-09-12\",\n      }",{"title":5356,"description":5357,"authors":5362,"heroImage":5358,"date":5364,"body":5365,"category":718,"tags":5366},[5363],"Simon Tarchichi","2017-09-12","Continuous Integration allows you to:\n\n\n- Deploy your app instantly, when new code is pushed into a repo\n\n- Build your app (in our case `npm run build`)\n\n- Trigger test scripts (and block deployment if a test fails)\n\n\nIt is definitely worth the effort if you update your app regularly.\n\n\nGitLab is a service that started as an open-source GitHub competitor, mostly\nto host code in Git repositories, and evolved into an amazing tool that I\nwon’t introduce here, as it isn’t related to Vue.js. One thing though, they\nwere one of the first major companies to use Vue.js for their user\ninterface.\n\n\nDocker has to be mentioned as well. It is the most popular containerization\nservice. It basically means you get to execute code in a secure environment,\nconfigured exactly like your dev/prod. Very useful when you need to make\nsure your code is executed with all its dependencies.\n\n\nEach of these tools would require many posts to be covered. We’ll focus on\nsetting up [CI/CD](/topics/ci-cd/) for your Vue.js project. We’ll assume you\nhave no knowledge in the matter.\n\n\n[GitLab CI/CD is free for personal projects](/pricing/#gitlab-com), I don’t\nknow any other tool with such a beautiful UI that does that. If you do,\nplease let me know.\n\n\n### The .gitlab-ci.yml file\n\n\nCreate a `.gitlab-ci.yml` file at the root of your repo. GitLab will check\nfor this file when new code is pushed. If the file is present, it will\ndefine a [pipeline](https://docs.gitlab.com/ee/ci/pipelines/index.html),\nexecuted by a [GitLab Runner](http://docs.gitlab.com/runner/). Click the\nlinks if you are curious, or keep reading to see a working example.\n\n\nDefault stages of a pipeline are:\n\n\n1. build\n\n1. test\n\n1. deploy\n\n\nAgain, you don’t need to master this, but this is the most common use case.\nYou may not have set up unit tests, and if you haven’t, you may remove this\nstep from the file, GitLab won’t mind.\n\n\nHere is our file, you may copy/paste it in your repo:\n\n\n```\n\nbuild site:\n  image: node:6\n  stage: build\n  script:\n    - npm install --progress=false\n    - npm run build\n  artifacts:\n    expire_in: 1 week\n    paths:\n      - dist\n\nunit test:\n  image: node:6\n  stage: test\n  script:\n    - npm install --progress=false\n    - npm run unit\n\ndeploy:\n  image: alpine\n  stage: deploy\n  script:\n    - apk add --no-cache rsync openssh\n    - mkdir -p ~/.ssh\n    - echo \"$SSH_PRIVATE_KEY\" >> ~/.ssh/id_dsa\n    - chmod 600 ~/.ssh/id_dsa\n    - echo -e \"Host *\\n\\tStrictHostKeyChecking no\\n\\n\" > ~/.ssh/config\n    - rsync -rav --delete dist/ user@server.com:/your/project/path/\n  ```\n\n### Test our file\n\n\nNow commit and push the `.gitlab-ci.yml` file to your GitLab repo.\n\n\nHere is how it will look in the Pipelines tab of GitLab UI:\n\n\n![GitLab CI/CD\nPipelines](https://about.gitlab.com/images/blogimages/gitlab-ci-pipelines.png){:\n.shadow}\u003Cbr>\n\n\nThe green checkmark indicates that the step has succeeded and you can see\nthe logs when clicking it.\n\n\nIn the second example, the tests have failed, click the red mark to read the\nlogs and understand what went wrong.\n\n\n![GitLab CI/CD\nlogs](https://about.gitlab.com/images/blogimages/gitlab-ci-failed.png){:\n.shadow}\u003Cbr>\n\n\n### File anatomy\n\n\n- `image` is the link to the Docker image. I have chosen to use public\nofficial images, but you may use one from the Docker Hub or a private\nregistry.\n\n\n- `stage` should be `build`, `test` or `deploy` if you use defaults. But\nthat [can be customized](https://docs.gitlab.com/ee/ci/yaml/stages).\n\n\n- `script` are command lines executed inside our build environment.\n\n\n- `artifacts` describes a path to the build result. The files in this path\ncan be used in the next build steps (in `deploy` in our example). You can\ndownload artifacts from Gitlab UI.\n\n\nMore about the `.gitlab-ci.yml` file options [in the\ndocs](https://docs.gitlab.com/ee/ci/yaml/).\n\n\n### About the deployment script\n\n\nI have described my use case here, but it may not be the simplest. Relevant\nexamples for [deployment to Amazon\nS3](/blog/ci-deployment-and-environments/) or other services can\nbe found online.\n\n\nTo get it working, you’ll need to **provide GitLab with a private SSH key**.\nIf you are no security expert, then it is time to take advice from one. The\nbottom line is **do not give it your private SSH key**, create one that is\nused only by GitLab.\n\n\n```\n\n# create gitlab user\n\nadduser gitlab\n\n\n# generate a DSA SSH key\n\nsu -l gitlab\n\nssh-keygen -t dsa\n\n\n# authorize the key to log in using the public key and output the private\none\n\ncd .ssh\n\nmv id_dsa.pub authorized_keys\n\ncat id_dsa && rm id_dsa\n\n```\n\n\nThen go to GitLab UI “Settings” (the gear icon), then “Variables” and\ncopy/paste the content of your terminal in “Value”. The “Key” should be\n`SSH_PRIVATE_KEY`. This private key will be used to do the `rsync`.\n\n\n![GitLab CI/CD\nvariables](https://about.gitlab.com/images/blogimages/gitlab-ci-variables.png){:\n.shadow}\u003Cbr>\n\n\n## Links\n\n\n- [Sample GitLab repository](https://gitlab.com/kartsims/vue-ci)\n\n- [Gitlab CI/CD docs](https://docs.gitlab.com/ee/ci/)\n\n\nIf you need more information, leave a comment I’ll be happy to help you if I\ncan.\n\n\n\"[Golden Gate Bridge Vista\nPoint](https://unsplash.com/@tigesphotos?photo=-BiEu8VP9-M)\" by [Tiger\nRobinson](https://unsplash.com/@tigesphotos) on Unsplash\n\n{: .note}\n",[9,763],{"slug":5368,"featured":6,"template":700},"vuejs-app-gitlab","content:en-us:blog:vuejs-app-gitlab.yml","Vuejs App Gitlab","en-us/blog/vuejs-app-gitlab.yml","en-us/blog/vuejs-app-gitlab",{"_path":5374,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5375,"content":5381,"config":5388,"_id":5390,"_type":14,"title":5391,"_source":16,"_file":5392,"_stem":5393,"_extension":19},"/en-us/blog/welcoming-opencores-to-gitlab",{"title":5376,"description":5377,"ogTitle":5376,"ogDescription":5377,"noIndex":6,"ogImage":5378,"ogUrl":5379,"ogSiteName":685,"ogType":686,"canonicalUrls":5379,"schema":5380},"OpenCores come to GitLab","OpenCores moves to GitLab to accelerate digital design flow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669768/Blog/Hero%20Images/gitlab-opencores-oliscience.jpg","https://about.gitlab.com/blog/welcoming-opencores-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"OpenCores come to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrea Borga\"},{\"@type\":\"Person\",\"name\":\"David Planella\"}],\n        \"datePublished\": \"2019-12-03\",\n      }",{"title":5376,"description":5377,"authors":5382,"heroImage":5378,"date":5385,"body":5386,"category":300,"tags":5387},[5383,5384],"Andrea Borga","David Planella","2019-12-03","\n\n[OpenCores](https://opencores.org), the largest and most popular Gateware\ndevelopment community with over [300K members and 1200+\nprojects](https://opencores.org/about/statistics), is moving to GitLab. This\nis excellent news all around: as a catalyst for accelerating IP core development\nand for introducing GitLab to innovative uses in the scientific and electronic\ndesign communities.\n\n## Gateware and OpenCores\n\n![Gateware design flow diagram](https://about.gitlab.com/images/blogimages/welcoming-opencores-to-gitlab/gateware_flow.png \"Gateware flow\")\n\nOpenCores is a repository of reusable units of logic, open to use as building\nblocks for the electronics design community. These units are most commonly known as\n[Intellectual Property (IP)\ncores](https://en.wikipedia.org/wiki/Semiconductor_intellectual_property_core),\nand are described (coded), in [Hardware Description\nLanguage](https://en.wikipedia.org/wiki/Hardware_description_language) (HDL)\nfor the most part.\n\n### What is gateware?\n\nIn the semiconductor industry, these are the basic constituents of advanced\ndigital designs, collectively known as\n**gateware**: A layer in the electronics development chain positioned in\nbetween _hardware_ (such as a Printed Circuit Board – PCB – or a packaged chip),\nand _firmware_ (a set of decoded and executed instructions for a microprocessor).\n\n### What is OpenCores?\n\nThe [OpenCores portal](https://opencores.org) hosts the source code for a\nmultitude of digital gateware projects. In its more than 20 years of web history, it has\nevolved into a platform that enables its user community to discover, showcase,\nand manage such projects, including revision control for [source code](/solutions/source-code-management/).\n\nThe target devices for gateware have historically been\n[FPGA](https://en.wikipedia.org/wiki/Field-programmable_gate_array) (Field\nProgrammable Gate Arrays) and\n[ASIC](https://en.wikipedia.org/wiki/Application-specific_integrated_circuit)s\n(Application Specific Integrated Circuits), which allow building a vast range\nof hardware digital electronics appliances. These are often described as\n[SoC](https://en.wikipedia.org/wiki/System_on_a_chip) (System on a Chip).\n\nIn recent years, the OpenCores portal has been particularly focused on hosting FPGA\napplications, with the intention to enlarge the pool of available cores based\non emerging hardware description methods, such as\n[HLS](https://en.wikipedia.org/wiki/High-level_synthesis) (High-level\nsynthesis).\n\nOpenCores is also the place where digital designers meet to showcase, promote,\nand talk about their passion and work. They do this through forums, news feeds,\nand much more!\n\n### Who maintains the OpenCores portal?\n\n[Oliscience](http://oliscience.nl/) (open logic interconnects science) act as\nthe stewards of the OpenCores community and its portal. Oliscience is an\ninitiative originated from the\n[CERN](/customers/cern/)-Nikhef Business Incubation\nCentre (CERN-BIC@Nikhef), and is [supported](https://opencores.org/partners) by\n[Nikhef](https://www.nikhef.nl/en/), the Dutch National Institute for Subatomic\nPhysics, and [ASTRON](https://www.astron.nl/), the Netherlands Institute for\nRadio Astronomy.\n\nAs part of the stewardship charter, Oliscience is committed to maintaining and\nsupporting the OpenCores portal. This mission involves globally promoting its\ncommunity, fostering the use of open standards and practices, actively\ndeveloping the portal infrastructure and content, and more.\nThe [Wishbone bus](https://en.wikipedia.org/wiki/Wishbone_(computer_bus)),\nused throughout OpenCores designs, is one of the most well-known examples.\n\n## Leading change and embracing the DevOps culture for Gateware development\n\n[Moore's law](https://en.wikipedia.org/wiki/Moore%27s_law) is slowing down, and\nthe semiconductor industry is starting to experience a new resurgence. With a\nwave of new opportunities arising, FPGA is one of the key technologies that\nplay a crucial role in the future of computing architectures.\n\nThe barrier to entry for becoming a gateway developer is fairly higher than learning a new programming language as a software developer. As\nsuch, the digital electronics industry is continually striving to simplify\nthe approach to programmable logic.\n\nOpen Source IP Cores play a significant role in this goal. They unlock a\nvast knowledge pool that enables new gateware developers to start hacking on\nnew projects straight away. They can use existing solutions to draw knowledge\nvery quickly.\n\nIP Cores strive for quality, and quality calls for a structured way to assess\nthe content of a code bundle. This is where Continuous Verification (CV) comes into\nplay.\n\nIn the context of programmable logic, CV is a\nworkflow in which Gateware defined in a [HDL](https://en.wikipedia.org/wiki/Hardware_description_language)\nruns against standardized testbenches and benchmarked to assess and rank its\nquality. Full coverage for test cases and failure corner cases is guaranteed.\n\n## Accelerating digital design with GitLab\n\nThe OpenCores community leaders have strong ties to [CERN](https://home.cern/)\nand the [European Space Agency](https://www.esa.int/). Both are leading\nresearch organizations committed to supporting their respective scientific\ncommunities, which use GitLab for internal development.\n\nBoth organizations and the electronics industry in general are particularly\ninterested in a better assessment of the quality of gateware products, as their\nusage in industrial and commercial applications continues to increase at an\naccelerated rate. When you launch a satellite into space, you can't just press\nthe reset button if there is a bug!\n\nWhile talking to those teams, and hearing the preliminary exploration of\nimplementing CV practices into gateware design, [GitLab's integral CI/CD\nfeatures](/solutions/continuous-integration/) seemed a natural fit to pioneer the adoption of a DevOps approach to\ndigital design.\n\nSource control was also a feature that would enable engineers to share and\ncollaborate on their code in the public space. In summary, the benefits of a\nsingle application for the entire DevOps cycle, with the ultimate goal of\nreducing the gateware design cycle time made the decision easy.\n\nThe next objective for the OpenCores team is to implement a CV process in the\nOpenCores portal, starting with FPGA and until ASICs. It's an ambitious one,\nwhich requires ambitious partners.\n\nAndrea Borga, Oliscience CEO mentions:\n> we have a very strong scientific background, and we love to make experiments…\n> all the time! Exploring new ideas, and striving for impeccable execution are\n> embedded in our engineering way of thinking. You need innovative and\n> ambitious partners to achieve equally innovative and ambitious goals. This\n> is why we do what we do, and why we firmly believe GitLab's vision and spirit\n> strongly align with our own. This is how we chose to go with them.\n\nGitLab is thrilled to start working with the OpenCores team, to contribute to\nthat goal and welcoming them to a community that leading Open Source projects\nsuch as Drupal, GNOME, KDE, Debian, Freedesktop and many more are already a\npart of.\n\n[Cover image](https://www.flickr.com/photos/130561288@N04/39116042294/) by\n[Fritzchens Fritz](https://www.flickr.com/photos/130561288@N04/),\nlicensed under [CC0 1.0](https://creativecommons.org/publicdomain/zero/1.0/)\n{: .note}\n",[721,9,827,1062,268],{"slug":5389,"featured":6,"template":700},"welcoming-opencores-to-gitlab","content:en-us:blog:welcoming-opencores-to-gitlab.yml","Welcoming Opencores To Gitlab","en-us/blog/welcoming-opencores-to-gitlab.yml","en-us/blog/welcoming-opencores-to-gitlab",{"_path":5395,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5396,"content":5401,"config":5407,"_id":5409,"_type":14,"title":5410,"_source":16,"_file":5411,"_stem":5412,"_extension":19},"/en-us/blog/whats-next-for-gitlab-ci",{"title":5397,"description":5398,"ogTitle":5397,"ogDescription":5398,"noIndex":6,"ogImage":3219,"ogUrl":5399,"ogSiteName":685,"ogType":686,"canonicalUrls":5399,"schema":5400},"From 2/3 of Git market to next-Gen CI system & auto DevOps","GitLab first became the standard for self hosting git with two-thirds of the market, then became the next generation CI system, and the next step is creating Auto DevOps.","https://about.gitlab.com/blog/whats-next-for-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"From 2/3 of the self-managed Git market, to the next-generation CI system, to Auto DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2017-06-29\",\n      }",{"title":5402,"description":5398,"authors":5403,"heroImage":3219,"date":5404,"body":5405,"category":1040,"tags":5406},"From 2/3 of the self-managed Git market, to the next-generation CI system, to Auto DevOps",[1425],"2017-06-29","\n\nGitLab has transformed from offering just version control to becoming the first integrated product for DevOps. With GitLab you can go all the way from chatting about an idea to measuring it in production without spending time on configuring a bunch of tools. The version control part of GitLab is now used by 2/3 of the market that self host Git. The continuous integration (CI) part of GitLab is now the most popular next generation CI system. Today we introduce the future direction of GitLab: Auto DevOps.\n\n\u003C!-- more -->\n\nWhen we [announced our master plan in September of 2016](/blog/gitlab-master-plan/), we gave our vision for a tool that changes the way developers create software. Before the end of 2016 we [completed the master plan](/releases/2016/12/22/gitlab-8-15-released/) and introduced Auto Deploy. Auto Deploy evolved and sparked a vision for a more integrated DevOps experience. Today we have a video to present that vision of Auto DevOps.\n\n## GitLab has 2/3 market share in the self-managed Git market\n\nWith more than 100,000 organizations self-hosting GitLab, we have the largest share of companies who choose to host their own code. We’re estimated to have two-thirds of the single tenant market. When [Bitrise surveyed](http://blog.bitrise.io/2017/01/27/state-of-app-development-in-2016.html#self-hosted) ten thousand developers who build apps regularly on their platform, they found that 67 percent of self-managed apps prefer GitLab’s on-premise solution.\n\n![Image via Bitrise blog](https://about.gitlab.com/images/blogimages/bitrise-self-hosted-chart.png){: .shadow}\u003Cbr>\n\nSimilarly, in their survey of roughly one thousand development teams, [BuddyBuild found](https://www.buddybuild.com/blog/source-code-hosting#selfhosted) that 79% of mobile developers who host their own code have chosen GitLab:\n\n![Image via buddybuild blog](https://about.gitlab.com/images/blogimages/buddybuild-self-hosted-chart.png){: .shadow}\u003Cbr>\n\nIn their articles, both Bitrise and BuddyBuild note that few organizations use self-managed instances. We think there is a selection effect since both of them are SaaS-only offerings. Based on our experience, in large organizations (over 750 people), it is still more common to self host your Git server (frequently on a cloud service like AWS or GCP) than to use a SaaS service.\n\n## GitLab CI is the most popular next-generation CI system\n\nOur commitment to seamless integration extends to CI. Integrated [CI/CD](/topics/ci-cd/) is both more time and resource efficient than a set of distinct tools, and allows developers greater control over their build pipeline, so they can spot issues early and address them at a relatively low cost. Tighter integration between different stages of the development process makes it easier to cross-reference code, tests, and deployments while discussing them, allowing you to see the full context and iterate much more rapidly. We've heard from customers like [Ticketmaster](/blog/continuous-integration-ticketmaster/) that adopting GitLab CI can transform the entire software development lifecycle (SDLC), in their case helping the Ticketmaster mobile development team deliver on the longstanding goal of weekly releases. As more and more companies look to embrace CI as part of their development methodology, having CI fully integrated into their overall SDLC solution will ensure these companies are able to realize the full potential of CI. You can read more about the benefits of integrated CI in our white paper, [Scaling Continuous Integration](http://get.gitlab.com/scaled-ci-cd/).\n\nIn his post on [building Heroku CI](https://blog.heroku.com/building-tools-for-developers-heroku-ci), Heroku’s Ike DeLorenzo noted that GitLab CI is “clearly the biggest mover in activity on Stack Overflow,” with more popularity than both Travis CI and CircleCI:\n\n![Image via Heroku blog](https://about.gitlab.com/images/blogimages/heroku-questions-chart.png){: .shadow}\u003Cbr>\n\nWhile the use of Jenkins for CI is still higher than any other solution, we see more and more organizations moving from Jenkins, because upgrading their Jenkins server is a brittle process. The last two big things that GitLab CI lacked were scheduled builds (contributed to [GitLab 9.2](/releases/2017/05/22/gitlab-9-2-released/)) and cross-project builds (released in [GitLab 9.3 on June 22](/releases/2017/06/22/gitlab-9-3-released/)).\n\n## Auto DevOps is next\n\nWe want to [deliver more of idea to production](https://gitlab.com/gitlab-org/gitlab-ce/issues/32639) and continue to make the flow even better. [Our direction](/direction/#ci--cd) is to fully automate DevOps with the concept of [Auto DevOps](https://gitlab.com/gitlab-org/gitlab-ee/issues/2517). In a cloud-native world, developers have many projects, and it doesn't make sense to have to set up their tools for every one of them. With help from the wider community we'll ensure that everything works out of the box, from code quality metrics to Review Apps, and from metrics to autoscaling.\n\nWatch our Head of Product Mark Pundsack demonstrate our Auto DevOps vision, including Auto Create, Auto Build, Auto CI, Auto Deploy, Auto Code Quality, and Auto Review Apps:\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/KGrJguM361c?rel=0\" frameborder=\"0\" allowfullscreen>\u003C/iframe>\n\nWe couldn't have built GitLab into the tool and company it is today without the contributions of the wider community, and the feedback from our customers. We're excited to see what you build with GitLab.\n\nHave thoughts about Auto DevOps? Comment on this blog post or on [the issue for Auto DevOps](https://gitlab.com/gitlab-org/gitlab-ee/issues/2517). Interested in what your team can do with GitLab Enterprise Edition? [Sign up for a free trial](/free-trial/) and let us know what you think.\n",[1105,721,1062,9],{"slug":5408,"featured":6,"template":700},"whats-next-for-gitlab-ci","content:en-us:blog:whats-next-for-gitlab-ci.yml","Whats Next For Gitlab Ci","en-us/blog/whats-next-for-gitlab-ci.yml","en-us/blog/whats-next-for-gitlab-ci",{"_path":5414,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5415,"content":5421,"config":5426,"_id":5428,"_type":14,"title":5429,"_source":16,"_file":5430,"_stem":5431,"_extension":19},"/en-us/blog/whitesource-for-dependency-scanning",{"title":5416,"description":5417,"ogTitle":5416,"ogDescription":5417,"noIndex":6,"ogImage":5418,"ogUrl":5419,"ogSiteName":685,"ogType":686,"canonicalUrls":5419,"schema":5420},"How to secure your dependencies with GitLab and WhiteSource","We walk you through how to configure WhiteSource in your GitLab instance to enhance your application security.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663445/Blog/Hero%20Images/snowymtns.jpg","https://about.gitlab.com/blog/whitesource-for-dependency-scanning","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to secure your dependencies with GitLab and WhiteSource\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2020-08-10\",\n      }",{"title":5416,"description":5417,"authors":5422,"heroImage":5418,"date":5423,"body":5424,"category":697,"tags":5425},[1775],"2020-08-10","GitLab's WhiteSouce integration empowers developers to enhance application\nsecurity\n\ndirectly within the GitLab UI. The integration provides dependency scanning\nwith\n\nin-depth analysis, along with actionable insights, and auto-remediation.\nWhiteSource for\n\nGitLab enhances your team's productivity, security, and compliance.\n\n\n[Rhys Arkins](https://twitter.com/rarkins?lang=en), Product Director at\nWhiteSource, and I hosted a webinar on \"[Harnessing development to scale\nAppSec](/webcast/scalable-secure-ci/)\"\n\nshowcasing the features of GitLab's WhiteSource integration for open source\ndependency scanning.\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n\u003Ciframe src=\"https://www.youtube-nocookie.com/embed/yJpE_ACt9og\"\nframeborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\nThis blog post will guide you through setting up WhiteSource in your private\nGitLab instance\n\nand show you how the integration with WhiteSource enhances your\napplication's security within GitLab.\n\n\n## Installing the WhiteSource integration\n\n\nFirst, let's go over how to install the WhiteSource integration. In this\nsection, I will review how to\n\nset up GitLab service credentials, generate a WhiteSource configuration,\n\nbuild WhiteSource containers, and how to run them in a Kubernetes cluster.\n\n\n### Requirements for WhiteSource integration\n\n\nBut first, the WhiteSource integration requires that you have the following\nsetup:\n\n\n- [GitLab on-prem instance](/pricing/#self-managed): The GitLab instance\nwhere the WhiteSource integration will run.\n\n- [WhiteSource\naccount](https://www.whitesourcesoftware.com/whitesource-pricing/): Provides\naccess to the WhiteSource integration.\n\n- [Kubernetes cluster](/solutions/kubernetes/): Deploys the WhiteSource\ncontainers.\n\n\n### Create GitLab service credentials\n\n\nThe next step is to create GitLab service credentials. This can be\naccomplished in three simple steps:\n\n\n- In your GitLab instance, go to `Admin Area > System Hooks` and create a\nsystem hook as follows:\n    - **URL:** `https://whitesource.INGRESS_URL.com/payload`\n    - **Secret Token:** Make up a token, you can use `openssl rand -base64 12`\n    - **Trigger:** All except `Tag push events`\n    - **Enable SSL Verification:** `Yes`\n\n  Note: Make sure you save the secret token for use in the next section.\n- Create a user named `@whitesource`, with a developer role. An email is not\nrequired.\n\n- As the `@whitesource` user, go to `Settings > Access tokens` and create a\npersonal access token:\n    - **Name:** `WhiteSourceToken`\n    - **Scopes:** `all`\n- Remember to save the access token for use in the next section.\n\n\n### Generate the WhiteSource configuration\n\n\nNext, we generate the WhiteSource configuration, which is used to configure\nthe WhiteSource integration containers.\n\nThis can be done in a few simple steps:\n\n\n- Login to\n[WhiteSource](https://saas.whitesourcesoftware.com/Wss/WSS.html#!login) and\nclick on\n\nthe `Integrate` tab.\n\n\n![whitesource webpage\nview](https://about.gitlab.com/images/whitesource-integration/whitesource_webpage_view.png)\n\nWhiteSource mainpage\n\n{: .note.text-center}\n\n\n- Expand the `WhiteSource for GitLab server` bar and fill the following:\n    - **GitLab Server API URL:** `https://GITLAB_SERVER_URL/api/v4`\n    - **GitLab Webhook URL:** `https://whitesource.INGRESS_URL.com/payload`\n    - **GitLab Webhook secret:** Use the same secret generated in GitLab credentials section\n    - **GitLab personal access token:** `@whitesource` user access token\n\n![whitesource integration\nview](https://about.gitlab.com/images/whitesource-integration/whitesource_integration_setup.png)\n\nWhiteSource integrations page\n\n{: .note.text-center}\n\n\n- Press `Get Activation Key` and copy the generated key\n\n- Open the\n[wss-configurator](https://gitlab.com/fjdiaz/whitesource-helm/-/blob/master/whitesource/wss-configuration/index.html)\nwith your browser\n\n- Select `Export` from the menu, and select the\n[prop.json](https://gitlab.com/fjdiaz/whitesource-helm/-/blob/master/whitesource/wss-configuration/config/prop.json)\n\n- Click on the `General` tab\n\n- Paste the generated key and click `Export` to save a new `prop.json` file\n\n\n### Build the WhiteSource containers\n\n\n- Move the generated prop.json from the previous section to\n[wss-gls-app](https://gitlab.com/fjdiaz/whitesource-helm/-/tree/master/whitesource/wss-gls-app/docker/conf),\n[wss-remediate](https://gitlab.com/fjdiaz/whitesource-helm/-/tree/master/whitesource/wss-remediate/docker/src),\nand\n[wss-scanner](https://gitlab.com/fjdiaz/whitesource-helm/-/tree/master/whitesource/wss-scanner/docker/conf).\n\n- Build and push the Docker containers:\n\n\n```bash\n\n$ docker build -t wss-gls-app:19.12.2 whitesource/wss-gls-app/docker\n\n$ docker push wss-gls-app:19.12.2\n\n\n$ docker build -t wss-scanner:19.12.1.2 whitesource/wss-scanner/docker\n\n$ docker push wss-scanner:19.12.1.2\n\n\n$ docker build -t wss-remediate:19.12.2 whitesource/wss-remediate/docker\n\n$ docker push wss-remediate:19.12.2\n\n```\n\n\n### Running the WhiteSource containers\n\n\nGitLab provides native Kubernetes cluster integration. This means that\nGitLab allows you\n\nto deploy software from [GitLab CI/CD](/topics/ci-cd/) pipelines directly to\nyour Kubernetes cluster.\n\n\nWhiteSource containers can be deployed and managed within the same\nKubernetes cluster\n\nused to deploy your application, all by running a simple Helm commands.\n\n\n- Download the WhiteSource [Helm\nchart](https://gitlab.com/fjdiaz/whitesource-helm)\n\n- Edit\n[values.yaml](https://gitlab.com/fjdiaz/whitesource-helm/-/blob/master/helm/whitesource/values.yaml)\n\n- In vaules.yaml set `whitesource.ingress` to\n**https://whitesource.INGRESS_URL.com**\n\n\nYou can get the INGRESS_URL from your Kubernetes cluster settings\n\n\n![ingress url\nlocation](https://about.gitlab.com/images/whitesource-integration/base_domain.png)\n\nIngress URL location\n\n{: .note.text-center}\n\n\n- Make sure Ingress is installed.\n\n\n![ingress\ninstallation](https://about.gitlab.com/images/whitesource-integration/ingress_installation.png)\n\nInstalling Ingress\n\n{: .note.text-center}\n\n\n- Install [Helm](https://helm.sh/docs/intro/install/)\n\n- Deploy WhiteSource with Helm template:\n\n\n```bash\n\nhelm upgrade -f helm/whitesource/values.yaml --install whitesource-gitlab\n./helm/whitesource\n\n```\n\n\n## Using WhiteSource\n\n\nOnce the WhiteSource plugin has been installed we can add the `@whitesource`\nuser to the repositories\n\nwe wish to scan. A merge request (MR) with the `.whitesource` file will be\ngenerated automatically.\n\n\nWhiteSource will now scan your repository and generate issues for all the\nvulnerabilities discovered on the main (master)\n\nbranch. These issues will provide detailed information on the vulnerability\nas well as how to resolve it. Some issues\n\ncan even be auto-remediated.\n\n\n![whitesource issue\nview](https://about.gitlab.com/images/whitesource-integration/whitesource_issues.png)\n\nWhiteSource vulnerability issues\n\n{: .note.text-center}\n\n\nEach time a new MR is pushed, a WhiteSource scan will run, and provide a\ndetailed output.\n\n\n![whitesource merge request\nview](https://about.gitlab.com/images/whitesource-integration/whitesource_merge_requests.png)\n\nWhiteSource MR scanning\n\n{: .note.text-center}\n\n\nEach link provided by WhiteSource shows detailed information on the\nvulnerabilities the scan detected:\n\n\n![whitesource web\nlinks](https://about.gitlab.com/images/whitesource-integration/whitesource_advanced_issues.png)\n\nWhiteSource vulnerability information\n\n{: .note.text-center}\n\n\nWhiteSource can be integrated into the [GitLab Security\nDashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/)\nso that your security team can manage the\n\nstatus of these vulnerabilites. Access to the Security Dashboard requires a\n[GitLab Ultimate account](/pricing/ultimate/).\n\n\nFor integrating WhiteSource to the Security Dashboard, add the following to\nthe CI.yaml:\n\n\n```\n\nwhitesource-security-publisher:\n  image: openjdk:8-jdk\n  when: manual\n  script:\n    - curl \"{{WEBHOOK_URL}}/securityReport?repoId=$CI_PROJECT_ID&repoName=$CI_PROJECT_NAME&ownerName=$CI_PROJECT_NAMESPACE&branchName=$CI_COMMIT_REF_NAME&defaultBranchName=$CI_DEFAULT_BRANCH&commitId=$CI_COMMIT_SHA\" -o gl-dependency-scanning-report-ws.json\n  artifacts:\n    paths:\n      - gl-dependency-scanning-report-ws.json\n    reports:\n      dependency_scanning:\n        - gl-dependency-scanning-report-ws.json\n    expire_in: 30 days\n```\n\n\nFor more details on the integration checkout [WhiteSource for\nGitLab](https://whitesource.atlassian.net/wiki/spaces/WD/pages/806191420/WhiteSource+for+GitLab).\n\nLearn more at [DevSecOps](/solutions/security-compliance/) and checkout the [Secure\ndirection page](/direction/secure/) for more\n\ninformation on the upcoming features and integrations.\n\n\n## Learn more about application security at GitLab\n\n\n- [How application security engineers can use GitLab to secure their\nprojects](/blog/secure-stage-for-appsec/)\n\n- [Get better container security with GitLab: 4 real-world\nexamples](/blog/container-security-in-gitlab/)\n\n- [How to capitalize on GitLab Security tools with external\nCI](https://docs.gitlab.com/ee/integration/jenkins.html)\n\n\nCover image by [Alexandra\nAvelar](https://unsplash.com/@alexandramunozavelar) on\n[Unsplash](https://unsplash.com/s/photos/snow-capped-mountains)\n\n{: .note}\n",[9,697,232,999,1228],{"slug":5427,"featured":6,"template":700},"whitesource-for-dependency-scanning","content:en-us:blog:whitesource-for-dependency-scanning.yml","Whitesource For Dependency Scanning","en-us/blog/whitesource-for-dependency-scanning.yml","en-us/blog/whitesource-for-dependency-scanning",{"_path":5433,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5434,"content":5439,"config":5444,"_id":5446,"_type":14,"title":5447,"_source":16,"_file":5448,"_stem":5449,"_extension":19},"/en-us/blog/why-gitlab-ci-cd",{"title":5435,"description":5436,"ogTitle":5435,"ogDescription":5436,"noIndex":6,"ogImage":710,"ogUrl":5437,"ogSiteName":685,"ogType":686,"canonicalUrls":5437,"schema":5438},"Why GitLab CI/CD?","With GitLab’s out-of-the-box CI/CD, you can spend less time maintaining and more time creating.","https://about.gitlab.com/blog/why-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why GitLab CI/CD?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-04-02\",\n      }",{"title":5435,"description":5436,"authors":5440,"heroImage":710,"date":5441,"body":5442,"category":300,"tags":5443},[715],"2019-04-02","\nDevOps speed is a competitive advantage for businesses. According to DORA, [companies that deploy more frequently perform better in the market](https://cloudplatformonline.com/2018-state-of-devops.html). Everyone wants to be able to do their jobs better and deploy more frequently, but as the organization grows, speed bumps keep getting in the way:\n\n*   **Too many integration points** – Connecting [CI/CD](/topics/ci-cd/) to all of the different tools in a [DevOps toolchain](/topics/devops/) is confusing and keeps adding more steps and more points of failure to the process.\n*   **Brittle tools** – We're spending more time maintaining and updating these tools than actually creating new features.\n*   **Slow modernization** – We want to leverage [microservices](/topics/microservices/) and [cloud native](/topics/cloud-native/) development, but we spend too much time putting out fires.\n\nWith these speed bumps come complicated workflows, lack of pipeline visibility, and confusion about processes. With the Total Cost of Ownership (TCO) going up as more resources go to maintenance, teams can't afford to innovate. As organizations scale, these complexities only get worse.\n\nThat sounds exhausting, doesn't it?\n\n## Current CI/CD tools\n\nAt GitLab, we love transparency so much we made it [one of our core values](https://handbook.gitlab.com/handbook/values/#transparency). It's also why [we list all other DevOps tools on our website](/competition/) (no, really). We think open and direct communication is the fastest way to get the feedback you need to make the right decisions. For DevOps teams, the right tools should make things easier but we've found that _more_ doesn't always mean _better_.\n\n### High maintenance\n\nIntegrating CI/CD tools with the rest of your toolchain can get complicated – managing and updating these tools regularly isn't any easier. Many teams rely on tool experts just to keep everything running smoothly.\n\n### Lack of cloud native compatibility\n\nAs more organizations look to leverage microservices and [cloud native](/topics/cloud-native/) development, they'll need CI/CD tools that support modern architecture. With some CI/CD platforms, teams still need additional plugins to connect to Kubernetes or a container registry. Teams using legacy CI/CD tools will need to upgrade in order to gain those cloud native capabilities.\n\n### Toolchain complexity\n\nToolchains sometimes have too much in common with [Rube Goldberg devices](https://www.youtube.com/watch?v=qybUFnY7Y8w). Adding on more applications, more platforms, and more handoffs increases complexity that slows down teams. Add to that the maintenance, plugin, and upgrade requirements to manage these separate tools, and productivity gets harder.\n\n## Why teams love GitLab CI/CD\n\nCI/CD tools should make engineers' lives easier by giving them greater visibility into their pipelines, without burdening them with complicated integrations and plugin maintenance. GitLab CI/CD is designed to be simple so teams can start using it right away.\n\n### Easy to use\n\nGitLab uses a YAML configuration that any developer can understand so you can build pipelines faster.\n\n### Cloud native CI/CD\n\nWith its built-in container registry and Kubernetes integration, GitLab supports cloud native development.\n\n### Simple architecture\n\nOne integrated application with one set of permissions.\n\n### Fast and efficient\n\nWith autoscaling runners, developers no longer have to wait on builds, and VMs spin up or down automatically to process queues at a lower cost.\n\n### Everything in one place\n\nGitLab CI/CD is already built into the same application that contains source code management, planning, monitoring, etc.\n\nAs a single application for the entire DevOps lifecycle, everything is in one conversation and visible across teams. With GitLab's out-of-the-box CI/CD, you can spend less time maintaining and more time creating. It's CI/CD that _just works_.\n\nWe invite you to explore GitLab CI/CD for yourself, and see why we were rated #1 in the Forrester CI Wave™.\n\n[Explore GitLab CI/CD](/solutions/continuous-integration/)\n{: .alert .alert-gitlab-purple.text-center}\n",[9,721,875],{"slug":5445,"featured":6,"template":700},"why-gitlab-ci-cd","content:en-us:blog:why-gitlab-ci-cd.yml","Why Gitlab Ci Cd","en-us/blog/why-gitlab-ci-cd.yml","en-us/blog/why-gitlab-ci-cd",{"_path":5451,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5452,"content":5458,"config":5465,"_id":5467,"_type":14,"title":5468,"_source":16,"_file":5469,"_stem":5470,"_extension":19},"/en-us/blog/why-gitlab-is-deprecating-compliance-pipelines-in-favor-of-security-policies",{"title":5453,"description":5454,"ogTitle":5453,"ogDescription":5454,"noIndex":6,"ogImage":5455,"ogUrl":5456,"ogSiteName":685,"ogType":686,"canonicalUrls":5456,"schema":5457},"GitLab moves from compliance pipelines to security policies","Learn about our decision to deprecate compliance pipelines and how to migrate to pipeline execution policies. The process is detailed in this tutorial.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098128/Blog/Hero%20Images/Blog/Hero%20Images/security-checklist_security-checklist.png_1750098128272.png","https://about.gitlab.com/blog/why-gitlab-is-deprecating-compliance-pipelines-in-favor-of-security-policies","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why GitLab is deprecating compliance pipelines in favor of security policies\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ian Khor\"}],\n        \"datePublished\": \"2024-10-01\",\n      }",{"title":5459,"description":5454,"authors":5460,"heroImage":5455,"date":5462,"body":5463,"category":697,"tags":5464},"Why GitLab is deprecating compliance pipelines in favor of security policies",[5461],"Ian Khor","2024-10-01","GitLab compliance pipelines ensure security- and compliance-related jobs in applicable projects are run in accordance with compliance frameworks. Similarly, scan execution policies assure GitLab security scans are run in pipelines in a compliant manner.\n\nWhat we’ve learned from users is that they’d like to capture benefits offered by each feature through a single, simpler solution. Users would like to combine the flexibility of [compliance pipelines](https://docs.gitlab.com/ee/user/project/settings/index.html#compliance-pipeline-configuration) with the simplicity and versatility of [security policies](https://docs.gitlab.com/ee/user/application_security/policies/).\n\nTo meet this request, we developed a new feature, [pipeline execution policies](https://docs.gitlab.com/ee/user/application_security/policies/pipeline_execution_policies.html), to help users enforce customized CI/CD jobs for all applicable projects. Pipeline execution policies perform a similar function to compliance pipelines, but with increased focus on compliance enforcement, flexibility, and a foundation to build and solve for more use cases in the future.\n\nTo reduce confusion, compliance pipelines have been [deprecated](https://docs.gitlab.com/ee/update/deprecations.html#compliance-pipelines) in 17.3 now that pipeline execution policies are available and, as part of the deprecation, we are providing a step-by-step workflow for migrating from compliance pipelines to pipeline execution policy type in 17.5.\n\nYou can follow along with the work we’re doing with the deprecation through this [epic](https://gitlab.com/groups/gitlab-org/-/epics/11275).\n\n![compliance pipelines - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098139/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098139599.png)\n\n## Why are we deprecating compliance pipelines?\n\nTo understand the reason behind this change, we first need to understand the difference between the [compliance management](https://about.gitlab.com/direction/govern/compliance/compliance-management/) and [policy management](https://about.gitlab.com/direction/govern/security_policies/security_policy_management/) features in GitLab. With compliance management, we are focused on helping you understand your compliance posture, providing tools to report to auditors, and surfacing compliance risks in a way that helps you take action.\n\nWe are also focused on increasing compliance visibility of framework requirements, violations, and audit events throughout the entire DevSecOps lifecycle. Our compliance management offering also establishes a direct association between controls and automations configured through policies back into compliance requirements established through compliance frameworks.\n\nPolicy management works hand in hand with compliance programs, as well as supporting scalable security initiatives. Policies give organizations a central location to globally enforce security controls, compliance controls, and automate security and compliance workflows. Security policies will continue to address core use cases across the lifecycle, such as defining enforcement around CI/CD component usage, blocking risks related to dependency and package management, and automating vulnerability management workflows to address security and compliance controls.\n\nTherefore, to ensure we provide the greatest value for our security and compliance users, we are deprecating compliance pipelines and providing a migration path for users to security policies. Not only does this make it clear and simple to the user how and when to enforce jobs as part of a project pipeline, but it also makes the distinction between compliance management and policy management in GitLab clearer. Compliance management is focused on compliance visibility, and policy management is focused on compliance and security enforcement across your entire GitLab instance.\n\n## What is the timeline for the deprecation and removal of compliance pipelines?\n\nThe iteration plan below can be found in the [issue that details the work we are doing](https://gitlab.com/groups/gitlab-org/-/epics/11275) to deprecate and remove compliance pipelines:\n\n**[Deprecation announcement](https://docs.gitlab.com/ee/update/deprecations.html#compliance-pipelines)**\n* Compliance pipeline deprecation and removal was announced in 17.3\n\n**[Compliance pipelines maintenance mode](https://gitlab.com/groups/gitlab-org/-/epics/12324)**\n* Adding banners and migration workflow, and docs\n* Released in 17.5\n\n**[Deter new compliance pipelines](https://gitlab.com/groups/gitlab-org/-/epics/14150)**\n* Adding warning banners for new pipelines\n* Encourage users to try the pipeline execution policy instead\n* Scheduled to start work on this 17.6\n* Scheduled to be released 17.8\n\n**[Compliance pipelines removal](https://gitlab.com/groups/gitlab-org/-/epics/12325) (Remove compliance pipelines)**\n* Provide tools to trial the removal and validate any errors\n* Scheduled to start work on this 17.8\n* Scheduled to be released 19.0\n\nAs you can see, we will start with the deprecation of compliance pipelines and the introduction of pipeline execution policy in the 17.3 release. \n\nLeading up to the removal of compliance pipelines in the 19.0 release, we are including new ways to inform and warn users about the upcoming removal. We are providing warning banners on new pipelines, as well as a workflow that can be used to migrate compliance pipelines to pipeline execution policy.\n\nWe ‘ll remove compliance pipelines in the 19.0 release, but provide a reverse feature flag in the milestones leading up that will help users test the removal and understand any impact prior to the removal date.\n\n## How to migrate your compliance pipelines to pipeline execution policy?\n\nThere are two ways users can access the workflow for migrating compliance pipelines to pipeline execution policy.\n\n1. When creating a new compliance framework, there will now be a warning banner that allows users to start using pipeline execution policy type instead of compliance frameworks:\n\n![compliance pipelines - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098140/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098139599.png)\n\n2. When editing an existing compliance framework, there will now be a warning banner that enables users to migrate their compliance pipelines to pipeline execution policy type – if they have a compliance pipeline configured.\n\n![compliance pipelines - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098140/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098139601.png)\n\nSelecting either \"Create policy\" or \"Migrate pipeline to a policy\" in either workflow will bring users to the \"New policy\" creation page in the \"Security Policies\" section. This will allow users to create a new security policy instead of a compliance pipeline. Or, if you migrate an existing compliance pipeline, the new policy will be populated with the compliance pipeline YAML as the remote source for the policy. Also, the policy scope will be populated with the framework from which you are migrating. \n\nThe policy will target all projects with that label for enforcement and apply enforcement of jobs defined in your remote file, now the pipeline execution YAML. By default, the new policy will be configured with the “override” mode, which will override downstream projects' `.gitlab-ci.yml` with the configuration you have defined (similar to compliance pipelines).\n\n![compliance pipelines - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098140/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098139604.png)\n\nAlternatively, you may use the “Inject” mode, which introduces a new set of reserved stages to run security and compliance jobs in isolation in a tamper-proof manner, without disrupting the project pipeline, and without coordinating with project teams to define stage names in their pipeline config. \n\nWith this approach, be sure to remove the `include:project`, which is no longer needed for this mode. And, depending on your version, ensure job names are unique (required in GitLab 17.2 and 17.3). In GitLab 17.4, we introduced additional enhancements for [managing conflicts](https://docs.gitlab.com/ee/user/application_security/policies/pipeline_execution_policies.html#job-naming-best-practice) for additional flexibility.\n\n## Start your migration today\n\nWe want to ensure that all GitLab users who are using compliance pipelines are fully aware of the deprecation of compliance pipelines in 17.3 and its eventual removal by the 19.0 release as a breaking change. \n\nWe are asking users to start migrating their compliance pipelines to the pipeline execution policy type as soon as possible, before the removal of compliance pipelines in GitLab 19.0.\n\nIf there are any questions, please contact your customer service representative or GitLab support for any help.\n\n> Follow along with the compliance pipeline deprecation progress in [this epic](https://gitlab.com/groups/gitlab-org/-/epics/11275).\n\n> Share feedback in [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/491924) regarding any gaps are blockers for adopting pipeline execution policies.",[697,917,696,9],{"slug":5466,"featured":91,"template":700},"why-gitlab-is-deprecating-compliance-pipelines-in-favor-of-security-policies","content:en-us:blog:why-gitlab-is-deprecating-compliance-pipelines-in-favor-of-security-policies.yml","Why Gitlab Is Deprecating Compliance Pipelines In Favor Of Security Policies","en-us/blog/why-gitlab-is-deprecating-compliance-pipelines-in-favor-of-security-policies.yml","en-us/blog/why-gitlab-is-deprecating-compliance-pipelines-in-favor-of-security-policies",{"_path":5472,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5473,"content":5479,"config":5484,"_id":5486,"_type":14,"title":5487,"_source":16,"_file":5488,"_stem":5489,"_extension":19},"/en-us/blog/why-gitops-should-be-workflow-of-choice",{"title":5474,"description":5475,"ogTitle":5474,"ogDescription":5475,"noIndex":6,"ogImage":5476,"ogUrl":5477,"ogSiteName":685,"ogType":686,"canonicalUrls":5477,"schema":5478},"Why GitOps should be the workflow of choice","What is GitOps and how do you apply it in real-world applications?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681239/Blog/Hero%20Images/shiro-hatori-WR-ifjFy4CI-unsplash.jpg","https://about.gitlab.com/blog/why-gitops-should-be-workflow-of-choice","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why GitOps should be the workflow of choice\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2020-04-17\",\n      }",{"title":5474,"description":5475,"authors":5480,"heroImage":5476,"date":5481,"body":5482,"category":718,"tags":5483},[1384],"2020-04-17","\n\n## How did we get here?\n\nIn 2006, with the launch of AWS Elastic Compute, Amazon set off a revolution in the way we, as developers, consume and use compute and other resources required to deploy and maintain the applications we write. Not long after, infrastructure-as-code started to explode onto the scene with projects like Puppet, Ansible, and Terraform.\n\nAs these technologies matured, it became apparent that scaling applications in a modern or cloud environment required reproducible, reusable components, and infrastructure-as-code became the gold standard for ensuring the proper allocation of resources to an application. At the same time, the infrastructure space and world of software continued to evolve. The concept of [continuous delivery](/topics/ci-cd/) and release of software came into vogue and was popularized by large technology companies. The \"book\" on continuous delivery came in 2011, where it became apparent that to move fast enough to keep up with market demands, a radically [faster DevOps](/topics/devops/) cycle was required.\n\nAs continuous delivery for software becomes more commonplace, new solutions in the infrastructure space have been created to keep up. Kubernetes and the rise of [\"serverless\"](/topics/serverless/) promised to once again free developers from the need to worry about infrastructure. In a post-DevOps world - how does one think about infrastructure-as-code and applications as one cohesive unit?  Enter GitOps.\n\n## What is GitOps?\n\n[GitOps](/topics/gitops/) is conceptually not that different from either infrastructure-as-code or continuous delivery. In fact, in many ways, it is the convergence of those two concepts. Developers and operations teams alike can share a common repository of code, and GitOps allows a developer-like experience for managing applications and their underlying infrastructure. In that way, you can use GitOps as an operating model for modern infrastructures like Kubernetes, serverless, and other cloud native technologies.\n\nVersion control and [continuous integration](/solutions/continuous-integration/) are essential tools for deploying software continuously and reliably. GitOps brings both of those software best practices to operations by making the repository the central-source-of-truth for all of the infrastructure required to run applications. With GitOps, any change to infrastructure is committed to the git repository along with any application changes.\n\nThis allows developers and operators to use familiar development patterns and branching strategies. From there, a merge request provides the [central place to collaborate](/topics/gitops/gitops-gitlab-collaboration/) and suggest changes. Once merged into the mainline, CI/CD should be configured to deploy both the application and infrastructure changes automatically. The way this enables synchronization between developers and operators is what can be very appealing about GitOps as the next iteration of DevOps.\n\n## Why GitOps?\n\nWhy are so many organizations large and small considering a move to a more GitOps-focused culture?\n\nAs software has eaten the world, business operational excellence has become directly aligned with the ability to deliver quality software faster. Business survival depends on adaptive and efficient software development practices. Those practices require new processes and changes in the way we think about change management.\n\nIn many software practices, the concept of code review and approval is where most of the checks and balances for deploying production code comes into play. At GitLab, we believe that the [merge request](https://docs.gitlab.com/ee/user/project/merge_requests/) is the best place to collaborate on code and approve changes.  Processes and tools that are external to the code change only serve to increase cycle time and inhibit an organization’s ability to deploy code quickly.\n\nOnce an organization has embraced continuous integration and code review as the place for change request approval, it is a natural progression to discuss the idea of continuous delivery to production after those CI gates and human approvals are passed. As GitOps takes that concept a step further and integrates the pipeline to production directly in the git and merge request workflow, it’s become a hot topic and one that will become the normal workflow for efficient software organizations. Taking unnecessary steps and tools out of the critical path to production enables an organization to deliver better products faster, without sacrificing the governance required to deploy code.\n\n\n\nCover image by [Shiro Hatori](https://unsplash.com/@shiroscope) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[9,830,232],{"slug":5485,"featured":6,"template":700},"why-gitops-should-be-workflow-of-choice","content:en-us:blog:why-gitops-should-be-workflow-of-choice.yml","Why Gitops Should Be Workflow Of Choice","en-us/blog/why-gitops-should-be-workflow-of-choice.yml","en-us/blog/why-gitops-should-be-workflow-of-choice",{"_path":5491,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5492,"content":5495,"config":5503,"_id":5505,"_type":14,"title":5506,"_source":16,"_file":5507,"_stem":5508,"_extension":19},"/en-us/blog/why-now-is-the-time-for-embedded-devsecops",{"noIndex":6,"title":5493,"description":5494},"Why now is the time for embedded DevSecOps","Learn how embedded development teams address long feedback cycles, manual compliance, and isolated development with DevSecOps.",{"title":5493,"description":5494,"authors":5496,"heroImage":5027,"date":5498,"body":5499,"category":741,"tags":5500},[5497],"Matt DeLaney","2025-07-01","For embedded systems teams, DevSecOps has traditionally seemed like an approach better suited to SaaS applications than firmware development. But this is changing. Software is now a primary differentiator in hardware products. New market expectations demand modern development practices. In response, organizations are pursuing \"embedded DevSecOps.\"\n\nWhat is embedded DevSecOps? The application of collaborative engineering practices, integrated toolchains, and automation for building, testing, and securing software to embedded systems development. Embedded DevSecOps includes necessary adaptations for hardware integration.\n## Convergence of market forces\nThree powerful market forces are converging to compel embedded teams to modernize their development practices.\n### 1. The software-defined product revolution\nProducts once defined primarily by their hardware are now differentiated by their software capabilities. The software-defined vehicle (SDV) market tells a compelling story in this regard. It's projected to grow from $213.5 billion in 2024 to [$1.24 trillion](https://www.marketsandmarkets.com/Market-Reports/software-defined-vehicles-market-187205966.html) by 2030, a massive 34% compound annual growth rate.\nThe software content in these products is growing considerably. By the end of 2025, the average vehicle is expected to contain [650 million lines of code](https://www.statista.com/statistics/1370978/automotive-software-average-lines-of-codes-per-vehicle-globally/). Traditional embedded development approaches cannot handle this level of software complexity. \n### 2. Hardware virtualization as a technical enabler\nHardware virtualization is a key technical enabler of embedded DevSecOps. Virtual electronic control units (vECUs), cloud-based ARM CPUs, and sophisticated simulation environments are becoming more prevalent. Virtual hardware allows testing that once required physical hardware.\n\nThese virtualization technologies provide a foundation for continuous integration ([CI](https://about.gitlab.com/topics/ci-cd/)). But their value is fully realized only when integrated into an automated workflow. Combined with collaborative development practices and automated pipelines, virtual testing helps teams detect issues much earlier, when fixes are far less expensive. Without embedded DevSecOps practices and tooling to orchestrate these virtual resources, organizations can't capitalize on the virtualization trend.\n### 3. The competitive and economic reality\nThree interrelated forces are reshaping the competitive landscape for embedded development:\n- The talent war has shifted decisively. As an embedded systems leader at a GitLab customer explained, “No embedded engineers graduating from college today know legacy tools like Perforce. They know Git. These young engineers will work at a company for six months on legacy tools, then quit.” Companies using outdated tools may lose their engineering future.\n- This talent advantage translates into competitive superiority. Tech-forward companies that attract top engineers with modern practices achieve remarkable results. For example, in 2024, [SpaceX](https://spacenews.com/spacex-launch-surge-helps-set-new-global-launch-record-in-2024/) performed more orbital launches than the rest of the world combined. Tech-forward companies excel at software development and embrace a modern development culture. This, among other things, creates efficiencies that legacy companies struggle to match. \n- The rising costs of embedded development — driven by long feedback cycles — create an urgent need for embedded DevSecOps. When developers have to wait weeks to test code on hardware test benches, productivity remains inherently low. Engineers lose context and must switch contexts when results arrive. The problem worsens when defects enter the picture. Bugs become more expensive to fix the later they're discovered. Long feedback cycles magnify this problem in embedded systems.\n\nOrganizations are adopting embedded DevSecOps to help combat these challenges.\n## Priority transformation areas\nBased on these market forces, forward-thinking embedded systems leaders are implementing embedded DevSecOps in the following ways. \n### From hardware bottlenecks to continuous testing\nHardware-testing bottlenecks represent one of the most significant constraints in traditional embedded development. These delays create the unfavorable economics described earlier — when developers wait weeks for hardware access, defect costs spiral.\nAddressing this challenge requires a multifaceted approach including: \n* Automating the orchestration of expensive shared hardware test benches among embedded developers  \n* Integrating both SIL (Software-in-the-Loop) and HIL (Hardware-in-the-Loop) testing into automated CI pipelines  \n* Standardizing builds with version-controlled environments\n\nEmbedded developers can accomplish this with GitLab's [On-Premises Device Cloud](https://gitlab.com/gitlab-accelerates-embedded/comp/device-cloud), a CI/CD component. Through automating the orchestration of firmware tests on virtual and real hardware, teams are better positioned to reduce feedback cycles from weeks to hours. They also can catch more bugs early on in the software development lifecycle.\n### Automating compliance and security governance\nEmbedded systems face strict regulatory requirements. Manual compliance processes are unsustainable.\nLeading organizations are transforming how they comply with these requirements by: \n* Replacing manual workflows with automated [compliance frameworks](https://about.gitlab.com/blog/introducing-custom-compliance-frameworks-in-gitlab/)  \n* Integrating specialized functional safety, security, and code quality tools into automated continuous integration pipelines  \n* Automating approval workflows, enforcing code reviews, and maintaining audit trails  \n* Configuring compliance frameworks for specific standards like ISO 26262 or DO-178C\n\nThis approach enables greater compliance maturity without additional headcount — turning what was once a burden into a competitive advantage. One leading electric vehicle (EV) manufacturer executes 120,000 CI/CD jobs per day with GitLab, many of which include compliance checks. And they can fix and deploy bug fixes to vehicles within an hour of discovery. This level of scale and speed would be extremely difficult without automated compliance workflows.\n### Enabling collaborative innovation\nHistorically, for valid business and technical reasons, embedded developers have largely worked alone at their desks. Collaboration has been limited. Innovative organizations break down these barriers by enabling shared code visibility through integrated source control and CI/CD workflows. These modern practices attract and retain engineers while unlocking innovation that would remain hidden in isolated workflows.\nAs one director of DevOps at a tech-forward automotive manufacturer (a GitLab customer) explains: \"It's really critical for us to have a single pane of glass that we can look at and see the statuses. The developers, when they bring a merge request, are aware of the status of a given workflow in order to move as fast as possible.\" This transparency accelerates innovation, enabling automakers to rapidly iterate on software features that differentiate their vehicles in an increasingly competitive market.\n## The window of opportunity\nEmbedded systems leaders have a clear window of opportunity to gain a competitive advantage through DevSecOps adoption. But the window won't stay open forever. Software continues to become the primary differentiator in embedded products, and the gap between leaders and laggards will only widen.\nOrganizations that successfully adopt DevSecOps will reduce costs, accelerate time-to-market, and unlock innovation that differentiates them in the market. The embedded systems leaders of tomorrow are the ones embracing DevSecOps today.\n> While this article explored why now is the critical time for embedded teams to adopt DevSecOps, you may be wondering about the practical steps to get started. Learn how to put these concepts into action with our guide: [4 ways to accelerate embedded development with GitLab](https://about.gitlab.com/blog/4-ways-to-accelerate-embedded-development-with-gitlab/).",[5501,693,9,5502],"embedded DevOps","automotive",{"featured":6,"template":700,"slug":5504},"why-now-is-the-time-for-embedded-devsecops","content:en-us:blog:why-now-is-the-time-for-embedded-devsecops.yml","Why Now Is The Time For Embedded Devsecops","en-us/blog/why-now-is-the-time-for-embedded-devsecops.yml","en-us/blog/why-now-is-the-time-for-embedded-devsecops",{"_path":5510,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5511,"content":5517,"config":5521,"_id":5523,"_type":14,"title":5524,"_source":16,"_file":5525,"_stem":5526,"_extension":19},"/en-us/blog/windows-shared-runner-beta",{"title":5512,"description":5513,"ogTitle":5512,"ogDescription":5513,"noIndex":6,"ogImage":5514,"ogUrl":5515,"ogSiteName":685,"ogType":686,"canonicalUrls":5515,"schema":5516},"Windows Shared Runners beta now available on GitLab.com","Scalable Windows VM's for running Windows build jobs on GitLab.com.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681027/Blog/Hero%20Images/windows-shared-beta.jpg","https://about.gitlab.com/blog/windows-shared-runner-beta","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Windows Shared Runners beta now available on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2020-01-21\",\n      }",{"title":5512,"description":5513,"authors":5518,"heroImage":5514,"date":3773,"body":5519,"category":718,"tags":5520},[1465],"\n\n\nGitLab has had support for Windows CI/CD Runners for quite a long time, but if you were doing Windows development, you needed to [install and manage these Runners](https://docs.gitlab.com/runner/install/windows.html) yourself. This works great for customers who prefer to manage their own Runners, but for customers who prefer to use GitLab.com shared Runners managed by the GitLab team, the choice has been limited to Linux.\n\nToday, we are happy to announce that Windows Shared Runners hosted by GitLab is available in beta. As we are starting to roll out this important service to our community, we invite you to help shape the direction of CI/CD tooling for the Windows ecosystem on GitLab.com.\n\n## What's new?\n\nNow, you can take advantage of a fully-managed, auto-scaling, and secure environment for running your build jobs on Windows virtual machines (VMs). These GitLab-hosted Windows Shared Runners are pre-configured with various software packages such as the Chocolately package manager for Windows,  Visual Studio 2019 Build Tools, Microsoft .Net Framework, to name a few. So you have a base set of tooling to start building your Windows applications without needing to set up and install your own self-hosted Windows Runners. You can find a full list of available Windows packages in the package [documentation](https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/gcp/windows-containers/blob/main/cookbooks/preinstalled-software/README.md).\n\nWith the Windows Shared Runners on GitLab.com, each job runs in a new virtual machine instance that gets deleted after the job is complete, ensuring that your code is 100% isolated and secure. We also take care of maintenance and upgrades to the pre-configured software packages, so you don't have to. Just like with GitLab.com Linux Runners, there’s no requirement to use Shared Runners. If your build tooling configuration or security requirements demand it, you can, as always, [install and self-host Windows Runners](https://docs.gitlab.com/runner/install/windows.html) on your infrastructure.\n\n## Technology Overview\n\nThe following details a few key specifications for the Windows Shared Runners:\n\n- The Windows Shared Runners use the GitLab [custom executor](https://docs.gitlab.com/runner/executors/custom.html) that we introduced in 12.1.\n- A new Windows Shared Runners virtual machine is created for each pipeline job and deleted after the job is completed.\n\n## Pricing\n\nTo begin with, Windows Shared Runner pricing will be the same as Linux Runners. Usage for Windows Runners will be deducted from your Runner minute pool [depending on your plan](/pricing/#gitlab-com). You can optionally [purchase additional runner minutes](https://docs.gitlab.com/ee/subscriptions/gitlab_com/#purchase-additional-ci-minutes) that will be used for both Linux and Windows shared runners.\n\nIn the future, Windows Shared Runners will likely use separate pricing that is higher than Linux Minutes. Any future [pricing](https://gitlab.com/gitlab-org/gitlab/issues/30834) changes will be announced on the GitLab blog.\n\n\n## Getting started\n\nTo get started, create a `.gitlab-ci.yml` file in your GitLab hosted project's root directory and add the following tags: `shared-windows`, `windows`, and `windows-1809`  as shown in the example configuration file.\n\n```\n.shared_windows_runners:\n  tags:\n  - shared-windows\n  - windows\n  - windows-1809\n\nstages:\n  - build\n  - test\n\nbefore_script:\n - Set-Variable -Name \"time\" -Value (date -Format \"%H:%m\")\n - echo ${time}\n - echo \"started by ${GITLAB_USER_NAME}\"\n\nbuild:\n  extends:\n  - .shared_windows_runners\n  stage: build\n  script:\n  - echo \"running scripts in the build job\"\n\ntest:\n  extends:\n  - .shared_windows_runners\n  stage: test\n  script:\n  - echo \"running scripts in the test job\"\n```\n\nIncluding the `.gitlab-ci.yml` file in the project repository means that any new commits will trigger the execution of your [GitLab CI/CD pipeline](/topics/ci-cd/).  In this file, you have the option of specifying tags so that a job will only run on GitLab Runners that match the tag specified. For more information on the use of tags, refer to the [tags](https://docs.gitlab.com/ee/ci/yaml/#tags.) section of the GitLab CI/CD Pipeline Configuration Reference documentation. The [Shared Runners](https://docs.gitlab.com/ee/user/gitlab_com/#shared-runners) section of the GitLab.com settings documentation page covers more configuration information for the Windows Shared Runners.\n\n\n## Notable limitations and known issues\n\nThe hosting of Windows Shared Runners is a new service on GitLab.com. This section covers any limitations or known issues that users of the beta should take into consideration when using this service.\n\n- The average provisioning time for a new Windows VM is at five minutes. This means that for the beta, you will notice slower build start times on the Windows Shared Runners fleet compared to Linux. In a future release, we will add capabilities to the autoscaler to enable the pre-warming of the virtual machine instances. This will significantly reduce the time it takes to provision a VM on the Windows fleet. Additional details and plans are covered in this [issue](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/autoscaler/issues/32).\n- Pending queue times will be longer than the queue times on the Linux Shared Runner fleet.\n- Since Windows Shared Runners are currently in beta, the performance, uptime, and capabilities will be limited, and so, they are not recommended for production use.\n- The Windows Shared Runners virtual machine instances do not use the GitLab Docker executor. This means that unlike the Linux Shared Runners, you will not be able to specify `image` and `services` in your pipeline configuration.\n- For the beta release, we have included a set of software packages in the base VM image. If your CI job requires additional software that's not included in this [list](https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/gcp/windows-containers/tree/master/cookbooks/preinstalled-software), then you will need to add installation commands to `before_script` or `script` to install the required software. Note: Each job runs on a new VM instance, so the installation of additional software packages needs to be repeated for each job in your pipeline.\n- There is the possibility that we introduce breaking changes that will require updates to pipelines that are using the Windows Shared Runner fleet.\n\n## Next steps\n\nWe [plan](https://gitlab.com/groups/gitlab-org/-/epics/2162) to continue to iterate quickly and improve the build environment, Runner, and tooling during the beta period. We invite you to complete this short [form](https://forms.gle/9qaB2kQcBX93PVax5) because your feedback is critical to helping us prioritize work on the most valuable improvements to the Windows Shared Runners solution.\n\nTo report a bug or request a feature or enhancement, follow these steps:\n- Open an issue in the [GitLab Runner project](https://gitlab.com/gitlab-org/gitlab-runner/issues).\n- Describe the feature enhancement and, if possible, include any links to examples from your repository.\n- Add these labels to the issue: `Shared Runners::Windows`, `group::runner`\n- Tag [@DarrenEastman](https://gitlab.com/DarrenEastman) on the issue.\n\n\n\n\nCover photo by William Daigneault on [Unsplash](https://unsplash.com/)\n{: .note}\n\n\n",[9],{"slug":5522,"featured":6,"template":700},"windows-shared-runner-beta","content:en-us:blog:windows-shared-runner-beta.yml","Windows Shared Runner Beta","en-us/blog/windows-shared-runner-beta.yml","en-us/blog/windows-shared-runner-beta",{"_path":5528,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5529,"content":5535,"config":5541,"_id":5543,"_type":14,"title":5544,"_source":16,"_file":5545,"_stem":5546,"_extension":19},"/en-us/blog/working-with-yaml-gitlab-ci-android",{"title":5530,"description":5531,"ogTitle":5530,"ogDescription":5531,"noIndex":6,"ogImage":5532,"ogUrl":5533,"ogSiteName":685,"ogType":686,"canonicalUrls":5533,"schema":5534},"Working with YAML in GitLab CI from the Android perspective","Guest author Renato Stanic shares a sample YAML configuration for Android projects, which helps his team with faster, more iterative development.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665524/Blog/Hero%20Images/yaml-gitlab-ci-android.png","https://about.gitlab.com/blog/working-with-yaml-gitlab-ci-android","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Working with YAML in GitLab CI from the Android perspective\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Renato Stanic\"}],\n        \"datePublished\": \"2017-11-20\",\n      }",{"title":5530,"description":5531,"authors":5536,"heroImage":5532,"date":5538,"body":5539,"category":718,"tags":5540},[5537],"Renato Stanic","2017-11-20","Using [continuous integration in our everyday\nworkflow](/solutions/continuous-integration/) can help us a lot with faster\nand iterative development, and having CI do checks every time we change our\ncodebase helps us with deal with fear of modifying code.\n\n\n\u003C!-- more -->\n\n\nDeploying app builds manually takes time and leaves us idle while we could\nbe developing new and exciting features instead. Here at Undabot we are\nusing GitLab CI for continuous integration. GitLab CI uses a YAML file for\njob configuration. In this blog post we will go through a sample YAML\nconfiguration for Android projects and describe the main YAML building\nblocks with common Android CI jobs.\n\n\n### YAML intro\n\nThe YAML file defines a set of jobs with constraints stating when they\nshould be run. The jobs are defined as top-level elements with a name and\nalways have to contain at least the `script` clause:\n\n\n```\n\nhelloworld_job:\n  script: \"echo Hello World!\"\n\nassemble_job:\n  script: \"./gradlew assembleRelease\"\n```\n\n\nYAML syntax allows for more complex job definitions than in the above\nexample:\n\n\n```\n\nbefore_script:\n  - bundle install\n\nafter_script:\n  - rm secrets\n\nstages:\n  - build\n  - test\n  - deploy\n\nhelloworld_job:\n  stage: build\n  script:\n    - echo Hello World\n  only:\n    - master\n  tags:\n    - android\n```\n\n\n`before_script` – commands that run before each jobs script\n\n`after_script` – commands that run after each jobs script\n\n`stages` – used to define build stages\n\n`only` – defines the names of branches and tags for which the job will run\n\n`tags` – used to select specific Runners from the list of all Runners that\nare allowed to run this project.\n\n\n## Initial setup for Android\n\n\nFirst step is to create a YAML file called `gitlab-ci.yml` in root directory\nof your Android project and add the following code:\n\n\n```\n\nbefore_script:\n  - export ANDROID_HOME=\"$HOME/Library/Android/sdk\"\n  - bundle install\nstages:\n  - build\n  - test\n  - quality_assurance\n  - deploy\n```\n\n\nIn `before_script` we execute these two commands:\n\n`- export ANDROID_HOME=\"$HOME/Library/Android/sdk”`– sets Android home\nenvironment variable to be available for all other jobs and Gradle tasks\n\n`- bundle install` – we are using fastlane for task automation and Bundler\nto manage Ruby gems so we need to run bundle install to make sure everything\nis installed correctly.\n\n\nIn the `stages` section we define four build stages:\n\n`- build`– for build jobs\n\n`- test`– for test jobs that include unit and instrumentation tests\n\n`- quality_assurance`– for jobs that run all of our QA tools\n\n`- deploy`– for deployment jobs\n\n\n## Build stage\n\n\nThis job (`build_job`) is used to create an APK artifact that can be used to\ntest the app manually or to upload it to the Play Store.\n\n\n```\n\nbuild_job:\n  stage: build\n  script:\n    - ./gradlew clean assembleRelease\n  artifacts:\n    paths:\n      - app/build/outputs/\n  ```\n\n`build_job:`– name of the CI job\n\n`stage: build`– it gets executed in the build stage\n\n`./gradlew clean assembleRelease`– executes Gradle command to create a\nrelease APK\n\n`artifacts:`– job section that defines list of files and directories that\nare attached to a job after completion.\n\n`paths:`– output file paths\n\n`app/build/outputs`– directory path of our APK\n\n\n## Unit tests\n\n\nThis job (`unit_tests`) runs our unit tests in a test stage. Every time they\nfail, a report artifact will be created. Each report artifact expires within\nfour days of creation.\n\n\n```\n\nunit_tests:\n  stage: test\n  script:\n    - ./gradlew test\n  artifacts:\n    name: \"reports_${CI_PROJECT_NAME}_${CI_BUILD_REF_NAME}\"\n    when: on_failure\n    expire_in: 4 days\n    paths:\n      - app/build/reports/tests/\n  ```\n\n`./gradlew test`– run Gradle command that triggers our unit tests\nartifacts:\n\n`name:`– defines artifact name by using environment variables\n\n`CI_PROJECT_NAME`– project name that is currently being built\n\n`CI_BUILD_REF_NAME`– branch or tag name for which project is built\n\n`when:`– defines when is it created (on_success, on_failure, always)\n\n`expire_in:`– defines when is it expired, after artifact has expired it gets\ndeleted from CI\n\n\n## Instrumentation tests\n\n\nThis job (`instrumentation_tests`) runs all of our instrumentation tests in\na test stage by starting a windowless emulator without sound and animations\nfollowed by a [custom bash\nscript](https://gist.github.com/anonymous/614aafb2d8710865c688684a8657a141)\nthat waits for the emulator to start, after which the device is unlocked by\nsending key event 82. When the emulator is ready we run the Gradle command\nfor instrumentation tests. Once all tests finished running, the emulator is\nkilled with a [custom bash\nscript](https://gist.github.com/anonymous/614aafb2d8710865c688684a8657a141).\n\n\n```\n\ninstrumentation_tests:\n  stage: test\n  script:\n    - emulator -avd testAVD -no-audio -no-window &\n    - ./ci/android-wait-for-emulator.sh\n    - adb devices\n    - adb shell settings put global window_animation_scale 0 &\n    - adb shell settings put global transition_animation_scale 0 &\n    - adb shell settings put global animator_duration_scale 0 &\n    - adb shell input keyevent 82 &\n    - ./gradlew connectedAndroidTest\n    - ./ci/stop-emulators.sh\n  artifacts:\n    name: \"reports_${CI_PROJECT_NAME}_${CI_BUILD_REF_NAME}\"\n    when: on_failure\n    expire_in: 4 days\n    paths:\n      - app/build/reports/androidTests/connected/\n  ```\n\n`- emulator - avd testAVD -no-audio -no-window &`\n\n`- ./ci/android-wait-for-emulator.sh`\n\nStarts the emulator and waits for it to boot.\n\n`- adb devices`\n\nDisplays list of found devices in GitLab web terminal.\n\n`- adb shell settings put global window_animation_scale 0 &`\n\n`- adb shell settings put global transition_animation_scale 0 &`\n\n`- abd shell settings put global animator_duration_scale 0 &`\n\nDisables all animations and transitions.\n\n\n## Static analysis\n\n\nThis job (`static_analysis`) runs all of static code analysis in QA stage.\nThis is a tricky area especially if you are working on a project with a lot\nof legacy code. My suggestion would be to disable all of the rules and start\nfixing them one at the time. Tools used for static analysis are lint,\ncheckstyle, pmd and findbugs.\n\n\n```\n\nstatic_analysis:\n  stage: quality_assurance\n  script:\n    - ./gradlew lint\n    - ./gradlew checkstyle\n    - ./gradlew pmd\n    - ./gradlew findbugs\n  artifacts:\n    name: \"reports_${CI_PROJECT_NAME}_${CI_BUILD_REF_NAME}\"\n    when: on_failure\n    expire_in: 4 days\n    paths:\n      - app/build/reports/\n```\n\n\n`- ./gradlew lint`\n\n`- ./gradlew checkstyle`\n\n`- ./gradlew pmd`\n\n`- ./gradlew findbugs`\n\nGradle commands that trigger QA tools.\n\n`- app/build/reports` – path to our QA reports\n\n\n## Deploy stage\n\n\nThe final job (`deploy_internal`) deploys the app to the QA team in deploy\nstage. You don’t want to deploy every time you commit something so this step\nis set as manual. Manual jobs are triggered via GitLab web interface by\npressing the play button in your pipeline list. If you are using fastlane as\nyour deployment tool, the last job will look like the following code:\n\n\n```\n\ndeploy_internal:\n  stage: deploy\n  script:\n    - bundle exec fastlane android deploy_lane\n  when: manual\n```\n\n\n`- bundle exec fastlane android deploy_lane`– executes fastlane deploy lane\nthat deploys app to the QA team\n\n`when: manual` – defines [when is a job\nexecuted](https://docs.gitlab.com/ee/ci/yaml/#when)\n\n\n## There’s plenty more\n\n\nSetting up Android continuous integration with GitLab CI is great and\nsupports plenty of cool features a lot more than we showed. Hopefully this\nshort introduction was helpful and is going to motivate you to discover more\nfeatures on your own.\n\n\nComplete `gitlab-ci.yml`:\n\n\n```\n\nbefore_script:\n  - export ANDROID_HOME=\"$HOME/Library/Android/sdk\"\n  - bundle install\n\nstages:\n\n- build\n\n- test\n\n- quality_assurance\n\n- deploy\n\n\nbuild_job:\n  stage: build\n  script:\n    - ./gradlew clean assembleRelease\n  artifacts:\n    paths:\n    - app/build/outputs/\n\nunit_tests:\n  stage: test\n  script:\n    - ./gradlew test\n  artifacts:\n    name: \"reports_${CI_PROJECT_NAME}_${CI_BUILD_REF_NAME}\"\n    when: on_failure\n    expire_in: 4 days\n    paths:\n      - app/build/reports/tests/\n\ninstrumentation_tests:\n  stage: test\n  script:\n    - emulator -avd testAVD -no-audio -no-window &\n    - ./ci/android-wait-for-emulator.sh\n    - adb devices\n    - adb shell settings put global window_animation_scale 0 &\n    - adb shell settings put global transition_animation_scale 0 &\n    - adb shell settings put global animator_duration_scale 0 &\n    - adb shell input keyevent 82 &\n    - ./gradlew connectedAndroidTest\n    - ./ci/stop-emulators.sh\n  artifacts:\n    name: \"reports_${CI_PROJECT_NAME}_${CI_BUILD_REF_NAME}\"\n    when: on_failure\n    expire_in: 4 days\n    paths:\n      - app/build/reports/androidTests/connected/\n\nstatic_analysis:\n  stage: quality_assurance\n  script:\n    - ./gradlew lint\n    - ./gradlew checkstyle\n    - ./gradlew pmd\n    - ./gradlew findbugs\n  artifacts:\n    name: \"reports_${CI_PROJECT_NAME}_${CI_BUILD_REF_NAME}\"\n    when: on_failure\n    expire_in: 4 days\n    paths:\n      - app/build/reports/\n\ndeploy_internal:\n  stage: deploy\n  script:\n    - bundle exec fastlane android deploy_lane\n  when: manual\n```\n\n\n_[Working with YAML in GitLab CI from an Android\nperspective](https://blog.undabot.com/working-with-yaml-in-gitlab-ci-from-android-perspective-b8cf54b5b911)\nwas originally published on Undabot's blog._\n",[9,763],{"slug":5542,"featured":6,"template":700},"working-with-yaml-gitlab-ci-android","content:en-us:blog:working-with-yaml-gitlab-ci-android.yml","Working With Yaml Gitlab Ci Android","en-us/blog/working-with-yaml-gitlab-ci-android.yml","en-us/blog/working-with-yaml-gitlab-ci-android",{"_path":5548,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5549,"content":5555,"config":5560,"_id":5562,"_type":14,"title":5563,"_source":16,"_file":5564,"_stem":5565,"_extension":19},"/en-us/blog/wrapping-up-commit",{"title":5550,"description":5551,"ogTitle":5550,"ogDescription":5551,"noIndex":6,"ogImage":5552,"ogUrl":5553,"ogSiteName":685,"ogType":686,"canonicalUrls":5553,"schema":5554},"Wrapping up GitLab Commit","From bagels to bowling with a healthy dose of DevSecOps and CI/CD in between, it was an epic day of learning and sharing at GitLab Commit Brooklyn.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680823/Blog/Hero%20Images/commit-brooklyn-graffiti-cover.jpg","https://about.gitlab.com/blog/wrapping-up-commit","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Wrapping up GitLab Commit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-09-18\",\n      }",{"title":5550,"description":5551,"authors":5556,"heroImage":5552,"date":5557,"body":5558,"category":300,"tags":5559},[1037],"2019-09-18","\n\n***Relive GitLab Commit Brooklyn through the power of lights, cameras, and a pinch of Tanuki magic. Here's the [full YouTube playlist for the event](https://www.youtube.com/playlist?list=PLFGfElNsQthaaqEAb6ceZvYnZgzSM50Kg)!***\n\nIf there's anything you need to understand about GitLab's first ever user conference, it's this: I started the day with a New York bagel, learned how to create a CI/CD pipeline in just 20 minutes, found out [NASA will take GitLab into space](/blog/open-source-nasa-gl/), and it ended in a bowling alley... yes, it was _that_ kind of day.\n\nWe did a neighborhood takeover of a few blocks in the Williamsburg area of Brooklyn and before I even arrived at the venue, I knew something interesting was happening. There was wall grafitti and street graffiti.\n\n![street graffiti](https://about.gitlab.com/images/blogimages/commitbrooklynstreet.jpg){: .shadow.small.center}\nGitLab has arrived in Brooklyn!\n{: .note.text-center}\n\nOver 400 attendees gathered in brick-and-light-filled meeting spaces for conversation, demonstrations, laughter, and even a screaming chicken (the result of the CI/CD demo). It was an epic day of sharing, learning and exploring that could have felt overwhelming. Instead, the quirky informal spaces seemed to relax everyone and make it easier to actually listen and learn.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n  \u003Cblockquote class=\"twitter-tweet\" data-partner=\"tweetdeck\">\u003Cp lang=\"en\" dir=\"ltr\">THE live coding keynote is here! \u003Ca href=\"https://twitter.com/eddiezane?ref_src=twsrc%5Etfw\">@eddiezane\u003C/a> of \u003Ca href=\"https://twitter.com/digitalocean?ref_src=twsrc%5Etfw\">@digitalocean\u003C/a> introduces his “startup” Screaming Chicken at \u003Ca href=\"https://twitter.com/hashtag/GitLabCommit?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabCommit\u003C/a> and shows how he runs it on \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@GitLab\u003C/a> AutoDevops, \u003Ca href=\"https://twitter.com/hashtag/Kubernetes?src=hash&amp;ref_src=twsrc%5Etfw\">#Kubernetes\u003C/a> and DO. The audience is riveted! \u003Ca href=\"https://t.co/ibao6ngeNX\">pic.twitter.com/ibao6ngeNX\u003C/a>\u003C/p>&mdash; Priyanka Sharma @ #GitLabCommit Brooklyn! (@pritianka) \u003Ca href=\"https://twitter.com/pritianka/status/1173972101713276928?ref_src=twsrc%5Etfw\">September 17, 2019\u003C/a>\u003C/blockquote>\n  \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nThis was not anyone's typical idea of a user conference: no large, impersonal hotel, no pre-fab food, and no stilted conversations with total strangers. No one spent the day in frigid air conditioning. Instead everyone moved seamlessly from space to space, inside and outside, and it really was refreshing.\n\nLunch was refreshing too. It's not every day a gorilla brings you grilled cheese and tater tots under sunny skies.\n\n![Gorilla Grilled Cheese](https://about.gitlab.com/images/blogimages/commitbrooklyngorilla.jpg){: .shadow.small.center}\nThis was some grilled cheese!\n{: .note.text-center}\n\nAfter lunch, some people met up with our CEO [Sid Sijbrandij](/company/team/#sytses) while others attended individual tracks.\n\n![Office hours with Sid](https://about.gitlab.com/images/blogimages/commitbrooklynsid.jpg){: .shadow.small.center}\nMeet the CEO!\n{: .note.text-center}\n\nAn open coffee and tea bar (we took over the local coffee shop and my iced chai latte was delicious) fueled lots of conversations about the challenges we all face around DevOps.\n\n![iced chai](https://about.gitlab.com/images/blogimages/commitbrooklynchai.jpg){: .shadow.small.center}\nCheers!\n{: .note.text-center}\n\nAnd then it was time to, well, bowl.\n\n![Bowling](https://about.gitlab.com/images/blogimages/commitbrooklynbowling.jpg){: .shadow.small.center}\nGitLab at Brooklyn Bowl\n{: .note.text-center}\n\nIt might be bragging, but we really do throw a great party (and user conference, for that matter).\n\nIf you'd like to see for yourself, you'll have another chance to network with others on the same DevOps journey. Get your tickets to [Commit London on October 9](/events/commit/#). You can also read about news from Commit: [$268 million in Series E funding, new partners, and more](/blog/live-from-commit-news/), and check out the highlight reel below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/hi2D0Se_VnA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003C%= partial \"includes/blog/blog-merch-banner\" %>\n",[9,3660,278,763,2701],{"slug":5561,"featured":6,"template":700},"wrapping-up-commit","content:en-us:blog:wrapping-up-commit.yml","Wrapping Up Commit","en-us/blog/wrapping-up-commit.yml","en-us/blog/wrapping-up-commit",{"_path":5567,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5568,"content":5574,"config":5581,"_id":5583,"_type":14,"title":5584,"_source":16,"_file":5585,"_stem":5586,"_extension":19},"/en-us/blog/zeit-launches-now-for-gitlab",{"title":5569,"description":5570,"ogTitle":5569,"ogDescription":5570,"noIndex":6,"ogImage":5571,"ogUrl":5572,"ogSiteName":685,"ogType":686,"canonicalUrls":5572,"schema":5573},"ZEIT launches Now for GitLab","This first-class integration can automatically deploy any GitLab project containing a static or dynamic website to ZEIT's global CDN.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670498/Blog/Hero%20Images/gitlab-zeit-cover.png","https://about.gitlab.com/blog/zeit-launches-now-for-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ZEIT launches Now for GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarup Banskota\"},{\"@type\":\"Person\",\"name\":\"Arunoda Susiripala\"}],\n        \"datePublished\": \"2019-04-01\",\n      }",{"title":5569,"description":5570,"authors":5575,"heroImage":5571,"date":5578,"body":5579,"category":300,"tags":5580},[5576,5577],"Sarup Banskota","Arunoda Susiripala","2019-04-01","\n\n**[ZEIT Now](https://zeit.co/now) is a [serverless](/topics/serverless/) deployment platform that takes the burden out of configuring the cloud**. Projects deploy to Now instantly, scale automatically, and require zero supervision.\n\n### Why we created Now\n\nWe believe that deployments should be fast, simple, reliable, and cost-effective. We want to **enable developers to focus on their core applications**, and not on configuring DNS, scalability, or other infrastructure. That's why we created Now – to allow you to just push code, while we take care of the infrastructure.\n\nGit is popular among us developers – our code is often backed by a Git repository hosted on GitLab or GitHub. For most of us, our workflow involves developing features in distinct branches, which get merged into a stable master branch (or its equivalent). For most teams, the strategy around deployments follows the same style – we first stage our work on feature-specific URLs, and then finally merge them into a production URL.\n\n**We want Now to enhance the developer workflow**. By sticking close to the source code, and integrating with code collaboration tools we love, we are helping enhance the code review and release process.\n\n### Now for GitLab\n\nIn this post, we showcase our most requested feature,  [Now for GitLab](https://zeit.co/gitlab). With this first-class GitLab integration, you can automatically deploy any GitLab project, and enjoy core Now features, including global CDN, Anycast DNS, HTTPS support, and DDOS Protection.\n\n### Benefits\n\nOnce set up, **Now builds and deploys automatically, for every commit you push to GitLab**. Each such deployment gets a unique URL and we keep all your deployments alive. Thanks to our usage-based pricing model, you only pay for actual invocations and don't have to worry about unused deployments.\n\n![GitLab commit](https://about.gitlab.com/images/blogimages/zeit-now/gitlab-commit.png){: .medium.center}\n\n*\u003Csmall>Being able to test a unique deployment for every commit allows you to merge changes to your project with confidence.\u003C/small>*\n\n**All deployments made within a merge request are listed chronologically on its page**. They can be tested at any time, allowing your team to try out changes as progress is made, and to iron out concerns before pushing a new feature to production.\n\n![track deployments on MR](https://about.gitlab.com/images/blogimages/zeit-now/deployments-mr.png){: .medium.center}\n\n*\u003Csmall>You can track all the deployments made towards a specific feature on its merge request page.\u003C/small>*\n\n**Every merge request receives a unique URL** based on its branch name. The URL points to the latest deployment made on the merge request branch. You can share the URL with your team or even publicly, for beta testing your changes.\n\n![staging](https://about.gitlab.com/images/blogimages/zeit-now/gitlab-staging.png){: .medium.center}\n\n*\u003Csmall>The unique URL for the merge request acts as a staging link that can be passed around to anybody in the team who is interested in tracking development updates on a specific feature.\u003C/small>*\n\n**Merged MRs are automatically deployed to production**. Once deployed, we automatically alias your deployment to the production domain name.\n\n![alias](https://about.gitlab.com/images/blogimages/zeit-now/gitlab-alias.png){: .medium.center}\n\n*\u003Csmall>Once a merge request lands on the [default branch](https://docs.gitlab.com/ee/user/project/repository/branches/default.html), it is automatically built, deployed, and aliased to the chosen production domain names.\u003C/small>*\n\n## Getting started with Now for GitLab\n\nWe offer a **powerful free tier** which allows you to deploy a small-scale production app without requiring a credit card. To get started, visit the [**ZEIT Sign up page**](https://zeit.co/signup) and click the `Continue with GitLab` button. When GitLab requests an authorization, click `Authorize`.\n\n![sign up for Now for GitLab](https://about.gitlab.com/images/blogimages/zeit-now/sign-up-zeit-gitlab.png){: .center}\n\n*\u003Csmall>By signing up for ZEIT with GitLab, you automatically connect Now with your GitLab account, making it easier to link to your GitLab projects.\u003C/small>*\n\nOnce you complete the authorization, you can [**link any existing GitLab project**](https://zeit.co/new) with your ZEIT account, or create a new one based on our [Quick Start templates](https://zeit.co/new).\n\n![quick start templates](https://about.gitlab.com/images/blogimages/zeit-now/templates.png){: .medium.center}\n\n*\u003Csmall>The Quick Start templates save you time from setting up boilerplate code for several popular projects, such as Next.js, Vue, or Hugo.\u003C/small>*\n\nPlease note that if you already have a ZEIT account, you can set up the connection to GitLab on your [ZEIT account page](https://zeit.co/account).\n\n## Prepare your project for Now\n\nTo be able to successfully process a GitLab project, Now needs to be provided with build and deployment information. This information can be provided via a [`now.json` configuration file](https://zeit.co/docs/v2/deployments/configuration).\n\nFor example, if you are interested in deploying Node.js serverless code, the `now.json` file could be framed as follows:\n\n    {\n      \"name\": \"GitLab Project\",\n      \"alias\": [\"gitlab-project.now.sh\"],\n      \"builds\": [{\n        \"src\": \"index.js\",\n        \"use\": \"@now/node\"\n       }]\n    }\n\n*\u003Csmall>[now.json](https://zeit.co/docs/v2/deployments/configuration) allows us to provide information about building, deploying, and aliasing a GitLab project with Now.\u003C/small>*\n\nMore information on configuring `now.json`, including all supported options, is available on its [docs page](https://zeit.co/docs/v2/deployments/configuration). We support building and deploying many popular technologies through our open-sourced official [Builders](https://zeit.co/docs/v2/deployments/builders/overview), including Python, Rust, PHP, and Go. We welcome your contributions toward new Builders to support your favorite technology. To help with that, we also have a [guide](https://zeit.co/docs/v2/deployments/builders/developer-guide/) in place that walks through the process of creating and publishing a Builder.\n\n### Custom domain names and Instant Rollbacks\n\nWhen a GitLab merge request is merged into the [default branch](https://docs.gitlab.com/ee/user/project/repository/branches/default.html), [Now for GitLab](https://zeit.co/gitlab) instantly triggers a new deployment. As soon as that deployment completes, it is **automatically aliased to the production domain names** that were specified through the `alias` property in `now.json`.\n\nWhen you deploy with Now, **we map all your code and configuration to a single, unique URL**. Now only performs a new build when the underlying code receives changes.\n\nIf you trigger a revert within GitLab on the [default branch](https://docs.gitlab.com/ee/user/project/repository/branches/default.html), the code and configuration perfectly match a deployment URL Now previously had. This allows us to perform the alias with the previous URL within milliseconds, thus providing an **Instant Rollback**.\n\n### Final words\n\nOur mission at ZEIT is to make the cloud accessible to everyone. The [Now for GitLab](https://zeit.co/gitlab) integration was one of our most requested features, and we are thrilled to make it available to you.\n\nPlease give [Now for GitLab](https://zeit.co/gitlab) a try, and let us know what you think. Our Twitter is [@zeithq](https://twitter.com/zeithq).\n",[9,232],{"slug":5582,"featured":6,"template":700},"zeit-launches-now-for-gitlab","content:en-us:blog:zeit-launches-now-for-gitlab.yml","Zeit Launches Now For Gitlab","en-us/blog/zeit-launches-now-for-gitlab.yml","en-us/blog/zeit-launches-now-for-gitlab",{"_path":5588,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":5589,"content":5595,"config":5601,"_id":5603,"_type":14,"title":5604,"_source":16,"_file":5605,"_stem":5606,"_extension":19},"/en-us/blog/2019-gartner-aro-mq",{"title":5590,"description":5591,"ogTitle":5590,"ogDescription":5591,"noIndex":6,"ogImage":5592,"ogUrl":5593,"ogSiteName":685,"ogType":686,"canonicalUrls":5593,"schema":5594},"Gartner names GitLab challenger in release orchestration","We're happy to share that GitLab is a Challenger in Gartner's 2019 ARO Magic Quadrant","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680619/Blog/Hero%20Images/construction-blueprint.jpg","https://about.gitlab.com/blog/2019-gartner-aro-mq","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab named Challenger in Gartner Magic Quadrant for Application Release Orchestration 2019\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2020-01-16\",\n      }",{"title":5596,"description":5591,"authors":5597,"heroImage":5592,"date":5598,"body":5599,"category":1062,"tags":5600},"GitLab named Challenger in Gartner Magic Quadrant for Application Release Orchestration 2019",[1445],"2020-01-16","\n\nWe are pleased to share that recently GitLab was named a Challenger in the Gartner 2019 Magic Quadrant for Application Release Orchestration. ARO is a relatively new area for GitLab, but we believe our placement as a Challenger compared to last year’s placement as a Niche Player reflects the work we’ve put in and rapid progress we’ve made.\n\nYou can visit our [ARO MQ commentary page](/analysts/gartner-aro19/) to read our thoughts on the ARO markets and this report along with the lessons we learn participating. We’ll be adding links to this page to our roadmap items that show our plans for continued improvement. \n\nGartner, Magic Quadrant for Application Release Orchestration, 7 October 2019, Daniel Betts, Chris Saunderson, Hassan Ennaciri, Christopher Little Gartner does not endorse any vendor, product or service depicted in its research publications, and does not advise technology users to select only those vendors with the highest ratings or other designation. Gartner research publications consist of the opinions of Gartner’s research organization and should not be construed as statements of fact. Gartner disclaims all warranties, express or implied, with respect to this research, including any warranties of merchantability or fitness for a particular purpose. \n{: .note}\n\nImage by \u003Ca href=\"https://pixabay.com/users/pisauikan-4552082/?utm_source=link-attribution&amp;utm_medium=referral&amp;utm_campaign=image&amp;utm_content=2682641\">pisauikan\u003C/a> from \u003Ca href=\"https://pixabay.com/?utm_source=link-attribution&amp;utm_medium=referral&amp;utm_campaign=image&amp;utm_content=2682641\">Pixabay\u003C/a>\n{: .note}\n",[721,1064,1062,9],{"slug":5602,"featured":6,"template":700},"2019-gartner-aro-mq","content:en-us:blog:2019-gartner-aro-mq.yml","2019 Gartner Aro Mq","en-us/blog/2019-gartner-aro-mq.yml","en-us/blog/2019-gartner-aro-mq",28,[678,705,728,749,770,793,813,837,858],1758747446209]