[{"data":1,"prerenderedAt":4288},["ShallowReactive",2],{"/en-us/blog/tags/workflow/":3,"navigation-en-us":19,"banner-en-us":449,"footer-en-us":466,"workflow-tag-page-en-us":676},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":8,"config":10,"_id":12,"_type":13,"title":14,"_source":15,"_file":16,"_stem":17,"_extension":18},"/en-us/blog/tags/workflow","tags",false,"",{"tag":9,"tagSlug":9},"workflow",{"template":11},"BlogTag","content:en-us:blog:tags:workflow.yml","yaml","Workflow","content","en-us/blog/tags/workflow.yml","en-us/blog/tags/workflow","yml",{"_path":20,"_dir":21,"_draft":6,"_partial":6,"_locale":7,"data":22,"_id":445,"_type":13,"title":446,"_source":15,"_file":447,"_stem":448,"_extension":18},"/shared/en-us/main-navigation","en-us",{"logo":23,"freeTrial":28,"sales":33,"login":38,"items":43,"search":376,"minimal":407,"duo":426,"pricingDeployment":435},{"config":24},{"href":25,"dataGaName":26,"dataGaLocation":27},"/","gitlab logo","header",{"text":29,"config":30},"Get free trial",{"href":31,"dataGaName":32,"dataGaLocation":27},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":34,"config":35},"Talk to sales",{"href":36,"dataGaName":37,"dataGaLocation":27},"/sales/","sales",{"text":39,"config":40},"Sign in",{"href":41,"dataGaName":42,"dataGaLocation":27},"https://gitlab.com/users/sign_in/","sign in",[44,88,186,191,297,357],{"text":45,"config":46,"cards":48,"footer":71},"Platform",{"dataNavLevelOne":47},"platform",[49,55,63],{"title":45,"description":50,"link":51},"The most comprehensive AI-powered DevSecOps Platform",{"text":52,"config":53},"Explore our Platform",{"href":54,"dataGaName":47,"dataGaLocation":27},"/platform/",{"title":56,"description":57,"link":58},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":59,"config":60},"Meet GitLab Duo",{"href":61,"dataGaName":62,"dataGaLocation":27},"/gitlab-duo/","gitlab duo ai",{"title":64,"description":65,"link":66},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":67,"config":68},"Learn more",{"href":69,"dataGaName":70,"dataGaLocation":27},"/why-gitlab/","why gitlab",{"title":72,"items":73},"Get started with",[74,79,84],{"text":75,"config":76},"Platform Engineering",{"href":77,"dataGaName":78,"dataGaLocation":27},"/solutions/platform-engineering/","platform engineering",{"text":80,"config":81},"Developer Experience",{"href":82,"dataGaName":83,"dataGaLocation":27},"/developer-experience/","Developer experience",{"text":85,"config":86},"MLOps",{"href":87,"dataGaName":85,"dataGaLocation":27},"/topics/devops/the-role-of-ai-in-devops/",{"text":89,"left":90,"config":91,"link":93,"lists":97,"footer":168},"Product",true,{"dataNavLevelOne":92},"solutions",{"text":94,"config":95},"View all Solutions",{"href":96,"dataGaName":92,"dataGaLocation":27},"/solutions/",[98,123,147],{"title":99,"description":100,"link":101,"items":106},"Automation","CI/CD and automation to accelerate deployment",{"config":102},{"icon":103,"href":104,"dataGaName":105,"dataGaLocation":27},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[107,111,115,119],{"text":108,"config":109},"CI/CD",{"href":110,"dataGaLocation":27,"dataGaName":108},"/solutions/continuous-integration/",{"text":112,"config":113},"AI-Assisted Development",{"href":61,"dataGaLocation":27,"dataGaName":114},"AI assisted development",{"text":116,"config":117},"Source Code Management",{"href":118,"dataGaLocation":27,"dataGaName":116},"/solutions/source-code-management/",{"text":120,"config":121},"Automated Software Delivery",{"href":104,"dataGaLocation":27,"dataGaName":122},"Automated software delivery",{"title":124,"description":125,"link":126,"items":131},"Security","Deliver code faster without compromising security",{"config":127},{"href":128,"dataGaName":129,"dataGaLocation":27,"icon":130},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[132,137,142],{"text":133,"config":134},"Application Security Testing",{"href":135,"dataGaName":136,"dataGaLocation":27},"/solutions/application-security-testing/","Application security testing",{"text":138,"config":139},"Software Supply Chain Security",{"href":140,"dataGaLocation":27,"dataGaName":141},"/solutions/supply-chain/","Software supply chain security",{"text":143,"config":144},"Software Compliance",{"href":145,"dataGaName":146,"dataGaLocation":27},"/solutions/software-compliance/","software compliance",{"title":148,"link":149,"items":154},"Measurement",{"config":150},{"icon":151,"href":152,"dataGaName":153,"dataGaLocation":27},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[155,159,163],{"text":156,"config":157},"Visibility & Measurement",{"href":152,"dataGaLocation":27,"dataGaName":158},"Visibility and Measurement",{"text":160,"config":161},"Value Stream Management",{"href":162,"dataGaLocation":27,"dataGaName":160},"/solutions/value-stream-management/",{"text":164,"config":165},"Analytics & Insights",{"href":166,"dataGaLocation":27,"dataGaName":167},"/solutions/analytics-and-insights/","Analytics and insights",{"title":169,"items":170},"GitLab for",[171,176,181],{"text":172,"config":173},"Enterprise",{"href":174,"dataGaLocation":27,"dataGaName":175},"/enterprise/","enterprise",{"text":177,"config":178},"Small Business",{"href":179,"dataGaLocation":27,"dataGaName":180},"/small-business/","small business",{"text":182,"config":183},"Public Sector",{"href":184,"dataGaLocation":27,"dataGaName":185},"/solutions/public-sector/","public sector",{"text":187,"config":188},"Pricing",{"href":189,"dataGaName":190,"dataGaLocation":27,"dataNavLevelOne":190},"/pricing/","pricing",{"text":192,"config":193,"link":195,"lists":199,"feature":284},"Resources",{"dataNavLevelOne":194},"resources",{"text":196,"config":197},"View all resources",{"href":198,"dataGaName":194,"dataGaLocation":27},"/resources/",[200,233,256],{"title":201,"items":202},"Getting started",[203,208,213,218,223,228],{"text":204,"config":205},"Install",{"href":206,"dataGaName":207,"dataGaLocation":27},"/install/","install",{"text":209,"config":210},"Quick start guides",{"href":211,"dataGaName":212,"dataGaLocation":27},"/get-started/","quick setup checklists",{"text":214,"config":215},"Learn",{"href":216,"dataGaLocation":27,"dataGaName":217},"https://university.gitlab.com/","learn",{"text":219,"config":220},"Product documentation",{"href":221,"dataGaName":222,"dataGaLocation":27},"https://docs.gitlab.com/","product documentation",{"text":224,"config":225},"Best practice videos",{"href":226,"dataGaName":227,"dataGaLocation":27},"/getting-started-videos/","best practice videos",{"text":229,"config":230},"Integrations",{"href":231,"dataGaName":232,"dataGaLocation":27},"/integrations/","integrations",{"title":234,"items":235},"Discover",[236,241,246,251],{"text":237,"config":238},"Customer success stories",{"href":239,"dataGaName":240,"dataGaLocation":27},"/customers/","customer success stories",{"text":242,"config":243},"Blog",{"href":244,"dataGaName":245,"dataGaLocation":27},"/blog/","blog",{"text":247,"config":248},"Remote",{"href":249,"dataGaName":250,"dataGaLocation":27},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":252,"config":253},"TeamOps",{"href":254,"dataGaName":255,"dataGaLocation":27},"/teamops/","teamops",{"title":257,"items":258},"Connect",[259,264,269,274,279],{"text":260,"config":261},"GitLab Services",{"href":262,"dataGaName":263,"dataGaLocation":27},"/services/","services",{"text":265,"config":266},"Community",{"href":267,"dataGaName":268,"dataGaLocation":27},"/community/","community",{"text":270,"config":271},"Forum",{"href":272,"dataGaName":273,"dataGaLocation":27},"https://forum.gitlab.com/","forum",{"text":275,"config":276},"Events",{"href":277,"dataGaName":278,"dataGaLocation":27},"/events/","events",{"text":280,"config":281},"Partners",{"href":282,"dataGaName":283,"dataGaLocation":27},"/partners/","partners",{"backgroundColor":285,"textColor":286,"text":287,"image":288,"link":292},"#2f2a6b","#fff","Insights for the future of software development",{"altText":289,"config":290},"the source promo card",{"src":291},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758208064/dzl0dbift9xdizyelkk4.svg",{"text":293,"config":294},"Read the latest",{"href":295,"dataGaName":296,"dataGaLocation":27},"/the-source/","the source",{"text":298,"config":299,"lists":301},"Company",{"dataNavLevelOne":300},"company",[302],{"items":303},[304,309,315,317,322,327,332,337,342,347,352],{"text":305,"config":306},"About",{"href":307,"dataGaName":308,"dataGaLocation":27},"/company/","about",{"text":310,"config":311,"footerGa":314},"Jobs",{"href":312,"dataGaName":313,"dataGaLocation":27},"/jobs/","jobs",{"dataGaName":313},{"text":275,"config":316},{"href":277,"dataGaName":278,"dataGaLocation":27},{"text":318,"config":319},"Leadership",{"href":320,"dataGaName":321,"dataGaLocation":27},"/company/team/e-group/","leadership",{"text":323,"config":324},"Team",{"href":325,"dataGaName":326,"dataGaLocation":27},"/company/team/","team",{"text":328,"config":329},"Handbook",{"href":330,"dataGaName":331,"dataGaLocation":27},"https://handbook.gitlab.com/","handbook",{"text":333,"config":334},"Investor relations",{"href":335,"dataGaName":336,"dataGaLocation":27},"https://ir.gitlab.com/","investor relations",{"text":338,"config":339},"Trust Center",{"href":340,"dataGaName":341,"dataGaLocation":27},"/security/","trust center",{"text":343,"config":344},"AI Transparency Center",{"href":345,"dataGaName":346,"dataGaLocation":27},"/ai-transparency-center/","ai transparency center",{"text":348,"config":349},"Newsletter",{"href":350,"dataGaName":351,"dataGaLocation":27},"/company/contact/","newsletter",{"text":353,"config":354},"Press",{"href":355,"dataGaName":356,"dataGaLocation":27},"/press/","press",{"text":358,"config":359,"lists":360},"Contact us",{"dataNavLevelOne":300},[361],{"items":362},[363,366,371],{"text":34,"config":364},{"href":36,"dataGaName":365,"dataGaLocation":27},"talk to sales",{"text":367,"config":368},"Get help",{"href":369,"dataGaName":370,"dataGaLocation":27},"/support/","get help",{"text":372,"config":373},"Customer portal",{"href":374,"dataGaName":375,"dataGaLocation":27},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":377,"login":378,"suggestions":385},"Close",{"text":379,"link":380},"To search repositories and projects, login to",{"text":381,"config":382},"gitlab.com",{"href":41,"dataGaName":383,"dataGaLocation":384},"search login","search",{"text":386,"default":387},"Suggestions",[388,390,394,396,400,404],{"text":56,"config":389},{"href":61,"dataGaName":56,"dataGaLocation":384},{"text":391,"config":392},"Code Suggestions (AI)",{"href":393,"dataGaName":391,"dataGaLocation":384},"/solutions/code-suggestions/",{"text":108,"config":395},{"href":110,"dataGaName":108,"dataGaLocation":384},{"text":397,"config":398},"GitLab on AWS",{"href":399,"dataGaName":397,"dataGaLocation":384},"/partners/technology-partners/aws/",{"text":401,"config":402},"GitLab on Google Cloud",{"href":403,"dataGaName":401,"dataGaLocation":384},"/partners/technology-partners/google-cloud-platform/",{"text":405,"config":406},"Why GitLab?",{"href":69,"dataGaName":405,"dataGaLocation":384},{"freeTrial":408,"mobileIcon":413,"desktopIcon":418,"secondaryButton":421},{"text":409,"config":410},"Start free trial",{"href":411,"dataGaName":32,"dataGaLocation":412},"https://gitlab.com/-/trials/new/","nav",{"altText":414,"config":415},"Gitlab Icon",{"src":416,"dataGaName":417,"dataGaLocation":412},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203874/jypbw1jx72aexsoohd7x.svg","gitlab icon",{"altText":414,"config":419},{"src":420,"dataGaName":417,"dataGaLocation":412},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203875/gs4c8p8opsgvflgkswz9.svg",{"text":422,"config":423},"Get Started",{"href":424,"dataGaName":425,"dataGaLocation":412},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":427,"mobileIcon":431,"desktopIcon":433},{"text":428,"config":429},"Learn more about GitLab Duo",{"href":61,"dataGaName":430,"dataGaLocation":412},"gitlab duo",{"altText":414,"config":432},{"src":416,"dataGaName":417,"dataGaLocation":412},{"altText":414,"config":434},{"src":420,"dataGaName":417,"dataGaLocation":412},{"freeTrial":436,"mobileIcon":441,"desktopIcon":443},{"text":437,"config":438},"Back to pricing",{"href":189,"dataGaName":439,"dataGaLocation":412,"icon":440},"back to pricing","GoBack",{"altText":414,"config":442},{"src":416,"dataGaName":417,"dataGaLocation":412},{"altText":414,"config":444},{"src":420,"dataGaName":417,"dataGaLocation":412},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":450,"_dir":21,"_draft":6,"_partial":6,"_locale":7,"title":451,"button":452,"image":457,"config":461,"_id":463,"_type":13,"_source":15,"_file":464,"_stem":465,"_extension":18},"/shared/en-us/banner","is now in public beta!",{"text":453,"config":454},"Try the Beta",{"href":455,"dataGaName":456,"dataGaLocation":27},"/gitlab-duo/agent-platform/","duo banner",{"altText":458,"config":459},"GitLab Duo Agent Platform",{"src":460},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1753720689/somrf9zaunk0xlt7ne4x.svg",{"layout":462},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":467,"_dir":21,"_draft":6,"_partial":6,"_locale":7,"data":468,"_id":672,"_type":13,"title":673,"_source":15,"_file":674,"_stem":675,"_extension":18},"/shared/en-us/main-footer",{"text":469,"source":470,"edit":476,"contribute":481,"config":486,"items":491,"minimal":664},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":471,"config":472},"View page source",{"href":473,"dataGaName":474,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":477,"config":478},"Edit this page",{"href":479,"dataGaName":480,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":482,"config":483},"Please contribute",{"href":484,"dataGaName":485,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":487,"facebook":488,"youtube":489,"linkedin":490},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[492,515,571,600,634],{"title":45,"links":493,"subMenu":498},[494],{"text":495,"config":496},"DevSecOps platform",{"href":54,"dataGaName":497,"dataGaLocation":475},"devsecops platform",[499],{"title":187,"links":500},[501,505,510],{"text":502,"config":503},"View plans",{"href":189,"dataGaName":504,"dataGaLocation":475},"view plans",{"text":506,"config":507},"Why Premium?",{"href":508,"dataGaName":509,"dataGaLocation":475},"/pricing/premium/","why premium",{"text":511,"config":512},"Why Ultimate?",{"href":513,"dataGaName":514,"dataGaLocation":475},"/pricing/ultimate/","why ultimate",{"title":516,"links":517},"Solutions",[518,523,525,527,532,537,541,544,548,553,555,558,561,566],{"text":519,"config":520},"Digital transformation",{"href":521,"dataGaName":522,"dataGaLocation":475},"/topics/digital-transformation/","digital transformation",{"text":133,"config":524},{"href":135,"dataGaName":133,"dataGaLocation":475},{"text":122,"config":526},{"href":104,"dataGaName":105,"dataGaLocation":475},{"text":528,"config":529},"Agile development",{"href":530,"dataGaName":531,"dataGaLocation":475},"/solutions/agile-delivery/","agile delivery",{"text":533,"config":534},"Cloud transformation",{"href":535,"dataGaName":536,"dataGaLocation":475},"/topics/cloud-native/","cloud transformation",{"text":538,"config":539},"SCM",{"href":118,"dataGaName":540,"dataGaLocation":475},"source code management",{"text":108,"config":542},{"href":110,"dataGaName":543,"dataGaLocation":475},"continuous integration & delivery",{"text":545,"config":546},"Value stream management",{"href":162,"dataGaName":547,"dataGaLocation":475},"value stream management",{"text":549,"config":550},"GitOps",{"href":551,"dataGaName":552,"dataGaLocation":475},"/solutions/gitops/","gitops",{"text":172,"config":554},{"href":174,"dataGaName":175,"dataGaLocation":475},{"text":556,"config":557},"Small business",{"href":179,"dataGaName":180,"dataGaLocation":475},{"text":559,"config":560},"Public sector",{"href":184,"dataGaName":185,"dataGaLocation":475},{"text":562,"config":563},"Education",{"href":564,"dataGaName":565,"dataGaLocation":475},"/solutions/education/","education",{"text":567,"config":568},"Financial services",{"href":569,"dataGaName":570,"dataGaLocation":475},"/solutions/finance/","financial services",{"title":192,"links":572},[573,575,577,579,582,584,586,588,590,592,594,596,598],{"text":204,"config":574},{"href":206,"dataGaName":207,"dataGaLocation":475},{"text":209,"config":576},{"href":211,"dataGaName":212,"dataGaLocation":475},{"text":214,"config":578},{"href":216,"dataGaName":217,"dataGaLocation":475},{"text":219,"config":580},{"href":221,"dataGaName":581,"dataGaLocation":475},"docs",{"text":242,"config":583},{"href":244,"dataGaName":245,"dataGaLocation":475},{"text":237,"config":585},{"href":239,"dataGaName":240,"dataGaLocation":475},{"text":247,"config":587},{"href":249,"dataGaName":250,"dataGaLocation":475},{"text":260,"config":589},{"href":262,"dataGaName":263,"dataGaLocation":475},{"text":252,"config":591},{"href":254,"dataGaName":255,"dataGaLocation":475},{"text":265,"config":593},{"href":267,"dataGaName":268,"dataGaLocation":475},{"text":270,"config":595},{"href":272,"dataGaName":273,"dataGaLocation":475},{"text":275,"config":597},{"href":277,"dataGaName":278,"dataGaLocation":475},{"text":280,"config":599},{"href":282,"dataGaName":283,"dataGaLocation":475},{"title":298,"links":601},[602,604,606,608,610,612,614,618,623,625,627,629],{"text":305,"config":603},{"href":307,"dataGaName":300,"dataGaLocation":475},{"text":310,"config":605},{"href":312,"dataGaName":313,"dataGaLocation":475},{"text":318,"config":607},{"href":320,"dataGaName":321,"dataGaLocation":475},{"text":323,"config":609},{"href":325,"dataGaName":326,"dataGaLocation":475},{"text":328,"config":611},{"href":330,"dataGaName":331,"dataGaLocation":475},{"text":333,"config":613},{"href":335,"dataGaName":336,"dataGaLocation":475},{"text":615,"config":616},"Sustainability",{"href":617,"dataGaName":615,"dataGaLocation":475},"/sustainability/",{"text":619,"config":620},"Diversity, inclusion and belonging (DIB)",{"href":621,"dataGaName":622,"dataGaLocation":475},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":338,"config":624},{"href":340,"dataGaName":341,"dataGaLocation":475},{"text":348,"config":626},{"href":350,"dataGaName":351,"dataGaLocation":475},{"text":353,"config":628},{"href":355,"dataGaName":356,"dataGaLocation":475},{"text":630,"config":631},"Modern Slavery Transparency Statement",{"href":632,"dataGaName":633,"dataGaLocation":475},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":635,"links":636},"Contact Us",[637,640,642,644,649,654,659],{"text":638,"config":639},"Contact an expert",{"href":36,"dataGaName":37,"dataGaLocation":475},{"text":367,"config":641},{"href":369,"dataGaName":370,"dataGaLocation":475},{"text":372,"config":643},{"href":374,"dataGaName":375,"dataGaLocation":475},{"text":645,"config":646},"Status",{"href":647,"dataGaName":648,"dataGaLocation":475},"https://status.gitlab.com/","status",{"text":650,"config":651},"Terms of use",{"href":652,"dataGaName":653,"dataGaLocation":475},"/terms/","terms of use",{"text":655,"config":656},"Privacy statement",{"href":657,"dataGaName":658,"dataGaLocation":475},"/privacy/","privacy statement",{"text":660,"config":661},"Cookie preferences",{"dataGaName":662,"dataGaLocation":475,"id":663,"isOneTrustButton":90},"cookie preferences","ot-sdk-btn",{"items":665},[666,668,670],{"text":650,"config":667},{"href":652,"dataGaName":653,"dataGaLocation":475},{"text":655,"config":669},{"href":657,"dataGaName":658,"dataGaLocation":475},{"text":660,"config":671},{"dataGaName":662,"dataGaLocation":475,"id":663,"isOneTrustButton":90},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"allPosts":677,"featuredPost":4267,"totalPagesCount":4286,"initialPosts":4287},[678,704,729,751,774,798,819,842,862,881,900,919,939,960,981,1000,1021,1042,1060,1081,1103,1123,1144,1166,1186,1206,1226,1248,1267,1288,1308,1330,1350,1371,1392,1413,1434,1453,1473,1492,1514,1533,1553,1572,1592,1611,1631,1650,1670,1691,1710,1729,1749,1768,1788,1808,1828,1849,1870,1891,1911,1933,1953,1973,1992,2011,2032,2051,2070,2088,2106,2127,2148,2168,2187,2207,2228,2247,2267,2286,2305,2325,2344,2364,2383,2402,2422,2442,2461,2481,2502,2522,2541,2560,2580,2598,2615,2633,2652,2671,2691,2711,2731,2751,2770,2787,2807,2826,2846,2864,2884,2904,2924,2944,2963,2981,3001,3021,3041,3059,3078,3098,3118,3137,3155,3175,3194,3212,3232,3252,3270,3289,3308,3328,3348,3367,3387,3407,3428,3448,3467,3485,3503,3522,3541,3561,3580,3600,3620,3637,3656,3675,3695,3714,3732,3751,3771,3789,3808,3827,3846,3866,3886,3905,3925,3944,3964,3984,4002,4021,4041,4062,4081,4101,4120,4139,4158,4177,4195,4214,4233,4252],{"_path":679,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":680,"content":688,"config":697,"_id":700,"_type":13,"title":701,"_source":15,"_file":702,"_stem":703,"_extension":18},"/en-us/blog/16-ways-to-get-the-most-out-of-software-documentation",{"title":681,"description":682,"ogTitle":681,"ogDescription":682,"noIndex":6,"ogImage":683,"ogUrl":684,"ogSiteName":685,"ogType":686,"canonicalUrls":684,"schema":687},"How to get the most out of software documentation","Want to get even more mileage out of your DevOps platform? Better software documentation is the answer. Here are tips to help you get started.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668339/Blog/Hero%20Images/a-tale-of-two-editors.jpg","https://about.gitlab.com/blog/16-ways-to-get-the-most-out-of-software-documentation","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to get the most out of software documentation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2022-01-11\",\n      }",{"title":681,"description":682,"authors":689,"heroImage":683,"date":691,"body":692,"category":693,"tags":694},[690],"Sharon Gaudin","2022-01-11","\n\nIt’s not a glamorous part of a DevOps platform, but software documentation is easy, sometimes hands-free, and, if done correctly, can help speed up development and deployment. Here are some tips to refresh your software documentation practice.\n\n## Defining documentation\n\nSoftware documentation – which includes everything from manuals to system and design requirements, change lists, code comments, and alert records – is a way to unify efforts between projects and DevOps teams, and to share specialized knowledge and guidance. It’s also a way to standardize practices and benchmark metrics. There’s a direct correlation between creating clear, comprehensive, searchable, up-to-date, and well-organized documents and a DevOps team’s success.\n\nNeed proof? According to the [Accelerate State of DevOps 2021 report](https://gitlab.com/gitlab-com/www-gitlab-com/uploads/069ee8e2ee6af463cf0aafcd89eda33e/state-of-devops-2021.pdf) from DORA, the DevOps Research and Assessment team at Google, DevOps teams with solid documentation practices are 2.4 times more likely to meet or exceed their reliability targets, 3.8 times more likely to implement security practices, and 2.5 times more likely to fully leverage the cloud.\n\nMaking sure you have strong documentation actually is one of the six suggestions the DORA report gave DevOps professionals who [want to become elite team performers](/blog/how-to-make-your-devops-team-elite-performers/).\n\nAs you work on a [DevOps platform](/solutions/devops-platform/) and create new efficiencies and processes, you will want to document them so you can carry them forward. No continually reinventing the wheel for you.\n\n### Tips for creating solid software documentation\n\nSo how do you go about building good documentation? Here are some basic steps to follow:\n\n- You need to decide who is responsible for the documentation. What works best for your team and your organization? Does the project need a [technical writer](/handbook/product/ux/technical-writing/) or can one of your developers handle it? Give one person or just a few people ownership of documentation. You’re more likely to have quality software documentation when someone has clear responsibility and no one can pass the buck. \n\n- Don’t forget about incorporating user experience into your documentation. It will give you a different view on use cases and experiences and enable readers to have their success moment [more quickly](https://docs.gitlab.com/ee/ci/quick_start/). \n\n- Think about the security requirements for your software. For instance, when a project uses network communication over public transport, does it provide secure communication with TLS and/or https? Inform users about [support policies for security releases](https://docs.gitlab.com/ee/policy/maintenance.html), allowing to plan accordingly for upgrades and maintenance windows. Additionally, what measurements do you need to take to make sure it complies with company security policies? Note that information in your documentation.\n\n- Use your documentation to explain technical decisions and share insights into [reference architectures](https://docs.gitlab.com/ee/administration/reference_architectures/). When debugging a problem, it is helpful to learn about the decisions, and also have ‘get help’ and [‘troubleshooting’ sections](https://docs.gitlab.com/ee/ci/troubleshooting.html) in your documentation.\n\n- Provide details about issues you faced with the project and how you worked them out. Make sure the details are explained so that others can easily understand them. Add URLs to issues or epics into your documentation to allow readers to follow, for example the [version history for product features](https://docs.gitlab.com/ee/development/documentation/styleguide/#version-text-in-the-version-history) in the GitLab documentation.\n\n- There should be specific rules about how to change, expand and update documentation. Create [documentation style guides](https://docs.gitlab.com/ee/development/documentation/styleguide/), including requirements, examples, use cases and specifications for writing for a global audience. If changes are made creating inconsistent data formats, it can be more difficult to organize and search documents.\n\n- Don’t just document at the end of a project. It should be done continuously throughout the development and deployment lifecycle – from planning through monitoring and feedback. (We’ll give you more tips about this below.)\n\n- Give people who are responsible for documentation the [training](/handbook/product/ux/technical-writing/fundamentals/) they need in how to collect data, write, organize, and maintain it.\n\n- Make sure the [people responsible for documentation](/handbook/product/ux/technical-writing/#designated-technical-writers) are included in all aspects of the DevOps lifecycle. Bring them into planning, design, and testing meetings. They can’t write about or collect information about what they don’t know is happening.\n\n- Make use of data created by automated processes. (Again, there’s more information on this below.)\n\n- Make sure your documentation isn’t just paraphrasing what the source code flow does. Explain the “why” as well as the use case for the project. Dependending on the size and users, your audiences may differ, and the introduction needs an [overview with different navigation routes](https://docs.gitlab.com/ee/index.html).\n\n- There’s no one right way to handle documentation. What you need for documentation may vary depending on things like the size and nature of your organization, the scope of your software projects, and compliance issues. A hospital or financial institution’s documentation needs might differ from those of a small, private company.\n\n## Continuous software documentation\n\nMuch like there are continuous integration and deployment, there also can be continuous documentation. You can make the automated processes on a DevOps platform do a good chunk of your documentation work by having them capture key information throughout the DevOps lifecycle and funnel it into your documentation stores. Make it part of your development workflow by approaching documentation with a DevOps mindset. Software documentation is easier and more helpful when it’s done continuously.\n\nYou can leverage existing tools to generate, convert and present documentation. GitLab provides an extensive REST API, which allows to [update the wiki](https://docs.gitlab.com/ee/api/wikis.html) programmantically, or modify a Markdown file in the Git repository from your CI/CD pipelines. If you want to present the documentation on a website, you can use [MkDocs](https://www.mkdocs.org/) to generate a static documentation website [served with GitLab Pages](https://gitlab.com/pages/mkdocs) for example. Code documentation with [Doxygen](https://www.doxygen.nl/manual/docblocks.html) can be generated in the same way as a [website reference documentation](https://gitlab.com/pages/doxygen). \n\n### Tips to make documentation easier and more continuous\n\n- The DevOps platform’s automated systems, which govern processes and monitor everything from system to software configurations, generate logs that can create a real-time, ongoing stream of documentation.\n\n- Scripts and configuration files that control automated processes, like testing, hold important configuration data that can be fed into documentation.\n\n- Issue and alert logs, which generally contain information about problems, can be automatically documented. \n\n- Integrated [Observability](/direction/monitor/) keeps track of performance and availability of the software and also can add to documentation by providing access to metrics, traces and log dashboards and panels.  \n\nThese are just a few ways to automatically feed your continuous documentation operation. Sure, there are forms of documentation that will need some hands-on, but there are a lot that can be generated as part of the ongoing process. The data is there, so make good use of it.\n\n“Good documentation is foundational for successfully implementing DevOps capabilities,” the DORA report noted. “Teams with high quality documentation are better able to implement technical practices and perform better as a whole… From security to testing, documentation is a key way to share specialized knowledge and guidance both between these specialized sub-teams and with the wider team.” \n\n_[Michael Friedrich](/company/team/#dnsmichi), Senior Developer Evangelist, contributed to this blog post._\n","devsecops",[695,9,696],"DevOps platform","collaboration",{"slug":698,"featured":6,"template":699},"16-ways-to-get-the-most-out-of-software-documentation","BlogPost","content:en-us:blog:16-ways-to-get-the-most-out-of-software-documentation.yml","16 Ways To Get The Most Out Of Software Documentation","en-us/blog/16-ways-to-get-the-most-out-of-software-documentation.yml","en-us/blog/16-ways-to-get-the-most-out-of-software-documentation",{"_path":705,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":706,"content":712,"config":723,"_id":725,"_type":13,"title":726,"_source":15,"_file":727,"_stem":728,"_extension":18},"/en-us/blog/2018-global-developer-report",{"title":707,"description":708,"ogTitle":707,"ogDescription":708,"noIndex":6,"ogImage":709,"ogUrl":710,"ogSiteName":685,"ogType":686,"canonicalUrls":710,"schema":711},"Global Developer Report - 2018 for Open Source & DevOps","We surveyed over 5,000 software professionals to examine current attitudes and perception of the state of culture, workflow, and tooling within IT organizations.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663993/Blog/Hero%20Images/2018-developer-report-cover.jpg","https://about.gitlab.com/blog/2018-global-developer-report","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Global Developer Report confirms 2018 is the year for open source and DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Erica Lindberg\"}],\n        \"datePublished\": \"2018-03-07\",\n      }",{"title":713,"description":708,"authors":714,"heroImage":709,"date":716,"body":717,"category":718,"tags":719},"Global Developer Report confirms 2018 is the year for open source and DevOps",[715],"Erica Lindberg","2018-03-07","\n_Our [2022 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/) is out now! Learn the latest in DevOps insights from over 5,000 DevOps professionals._\n\nFrom the junior developer with just a handful of years’ experience to the software professional who’s been in the game for decades, we set out to see how the people behind the software are dealing with a rapidly changing technology landscape. This year’s survey reveals that unclear direction is a developer’s greatest challenge, IT managers are investing the most in continuous integration and delivery, and nearly all agree that the importance of open source cannot be overstated.\n\n\u003C!--more -->\n\nThe focus of [GitLab’s 2018 Global Developer survey](/developer-survey/previous/2018/) was to understand developers’ attitudes toward their workplace, uncover disparities between developers and their management, and benchmark the state of culture, workflow, and tooling within IT organizations. We asked a broad set of questions covering everything from developers’ opinions on their teams’ ability to collaborate and succeed at work to their preferences on workflow methodology and tooling.\n\n\u003Cdiv style=\"text-align: center\"> 🎙\u003Cstrong>\u003Ca href=\"https://webinars.devops.com/top-5-takeaways-from-the-2018-global-developer-survey\"> Join us March 29 for a live discussion with Alan Shimel of DevOps.com on the top 5 takeaways from the report\u003C/a> \u003C/strong> 🎙 ️\u003C/div>\n\n## Developer satisfaction\n\nWe found that the majority of developers are satisfied with the conditions of their workplace, and managers should focus on improving the planning and testing phases of the development lifecycle. We also found that IT management is more optimistic in their perception of overall workplace satisfaction with roughly 10 percent more respondents agreeing their team is set up to succeed, and that project requirements and deadlines are set up front.\n\n\u003Cimg src=\"/images/blogimages/2018-developer-report-stats_2x.jpg\" alt=\"2018 Developer Report\" style=\"width: 900px;\"/>\n\nDelays during the planning phase emerged as a top challenge for all respondents and unclear direction remains the greatest challenge to getting work done for developers.\n\n## DevOps\n\nCommitment to and demand for DevOps is growing, despite challenges posed by outmoded tooling and cultural resistance to change. Adoption is still in early stages, with 23 percent identifying DevOps as their development methodology, but this is sure to increase with IT management naming it as one of their top three areas for technology investment in 2018. The tide of developer opinion is following suit: we found that the majority of developers agree that a DevOps workflow saves valuable time during the development process. Teams currently practicing DevOps confirm the productivity gains – high performers, who told us they deploy their code on demand, and who estimated that they spend 50 percent or more of their time on new work, report having a clear DevOps culture at rates more than double that of lower-performing teams.\n\n## Open source\n\nOpen source projects like [Kubernetes](/blog/containers-kubernetes-basics/) and [CoreOS](/blog/coreos-acquisition/) have gained a lot of recent attention and this year’s survey underscores the value of creating software in the open. 92 percent of total respondents agree that open source tools are important to software innovation and nearly 50 percent report that most of their tools are open source.\n\n## About the 2018 survey\n\nGitLab surveyed 5,296 software professionals of varying backgrounds and industries around the world. The margin of error is two percent, assuming a population size of 21 million software professionals and 99 percent confidence level.\n\n## Methodology\n\nWe launched this Global Developer Survey on November 17, 2017, collecting responses\nuntil December 18, 2017. During that time, we promoted the survey primarily on GitLab’s\nsocial media channels and newsletter. In order to correct for the gender imbalance\ndeveloping in our survey sample, we made an extra push via Twitter on December 5 to encourage\nwomen involved in the software development lifecycle to take the survey. By the end of the open\nperiod, we achieved approximately 25 percent female respondents, the same percentage of women who currently\nhold computing roles, according to [NCWIT](https://www.ncwit.org/sites/default/files/resources/womenintech_facts_fullreport_05132016.pdf).\n\n| Frequently asked questions |\n| -------- | -------- |\n| **How can I access the report?**   | You can view the complete report [here](/developer-survey/).   |\n| **Are the raw results publicly available?**  | Yes, you can view the raw data [here](https://www.surveymonkey.com/results/SM-G3S6S63P8/).   |\n| **Did only GitLab users take the survey?** | No, it was open to all who work in software production. You can view the survey demographics [here](/developer-survey/).  |\n| **How can I ask questions or give feedback about the survey and results?** | You can direct questions or comments about the survey to [surveys@gitlab.com](mailto:surveys@gitlab.com). |\n| **I’d like to participate in the next survey. Can I sign up for alerts?** | The best way to receive news about the Global Developer Survey is to sign up for our bi-weekly newsletter – you can do that below or visit our [Subscription Center](https://page.gitlab.com/SubscriptionCenter.html). |\n","insights",[720,721,722,9],"developer survey","open source","DevOps",{"slug":724,"featured":6,"template":699},"2018-global-developer-report","content:en-us:blog:2018-global-developer-report.yml","2018 Global Developer Report","en-us/blog/2018-global-developer-report.yml","en-us/blog/2018-global-developer-report",{"_path":730,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":731,"content":737,"config":745,"_id":747,"_type":13,"title":748,"_source":15,"_file":749,"_stem":750,"_extension":18},"/en-us/blog/4-ways-to-use-gitlab-issue-boards",{"title":732,"description":733,"ogTitle":732,"ogDescription":733,"noIndex":6,"ogImage":734,"ogUrl":735,"ogSiteName":685,"ogType":686,"canonicalUrls":735,"schema":736},"4 ways to use GitLab Issue Boards","By leveraging the power of labels, GitLab Issue Boards can be easily customized to support any workflow. Here are four examples.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671759/Blog/Hero%20Images/gitlab-issue-board-cover.png","https://about.gitlab.com/blog/4-ways-to-use-gitlab-issue-boards","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"4 ways to use GitLab Issue Boards\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Victor Wu\"}],\n        \"datePublished\": \"2018-08-02\",\n      }",{"title":732,"description":733,"authors":738,"heroImage":734,"date":740,"body":741,"category":300,"tags":742},[739],"Victor Wu","2018-08-02","\nThere are many different ways to build software and run projects. When we began\nto build portfolio and project management tools within GitLab, we asked\nourselves, \"How do you build tools to support a diverse array of workflows\nwithout crippling users with complexity?\" Our conclusion? Build fewer, but more\nflexible tools. GitLab's built-in [issue boards](/stages-devops-lifecycle/issueboard/) is a\ngreat example of how choosing to build a single, versatile tool can significantly\nreduce complexity and tooling maintenance for the user without limiting its\ncapabilities.\n\n> How do you build tools to support a diverse array of workflows without crippling\nusers with complexity? Build fewer, but more flexible tools.\n\nManaging projects and software is inherently complex, but the tools don't have to be.\nRegardless of how many projects, people, or products you're managing, getting the\nvisibility you need to keep things running smoothly should be the easy part.\n\n## GitLab issues board\n\nA GitLab Issues Board is a simplified approach to a complex problem. We built on\ntop of GitLab's existing issue-tracking functionality and leverage the power of\nGitLab issue labels by utilizing them as lists on a Kanban board. You can construct different\nviews of your issue board while maintaining the same filtering and sorting abilities\nyou see across the issue tracker. You can create multiple boards to capture every\nlayer of visibility you need, and define the scope of the board by milestone,\nlabels, assignee, and weight.\n\nAn Issue Board is based on its project's label structure, therefore, it applies\nthe same descriptive labels to indicate placement on the board, keeping\nconsistency throughout the entire development lifecycle. And, you can click\ndirectly into an issue from the board to get started on your work.\n\n## 4 uses for the GitLab issues board\n\nAn Issue Board shows you what issues your team is working on, who is assigned to\neach, and where in the workflow those issues are.\n\n### 1. Workflow tracking with GitLab issues\n\nWhether you are a project manager trying to stay on top of a project's status, or\na manager of a product who needs to report on progress, workflow visibility is critical.\n\nTo create a workflow issue board, simply create labels for each stage of your workflow\nand add them as lists on a board. Once you've labeled an issue, it will automatically\nappear on the list. When the issue is ready to move on to the next stage, simply drag\nand drop it into the next list. You can also update the labels directly in the issue\nand your changes will automatically appear on the board.\n\n![Using an issue board for workflow tracking](https://about.gitlab.com/images/blogimages/workflow.png){: .shadow.large.center}\n\nFor example, your workflow might look something like this:\n\n1. Development\n1. Design\n1. Review\n1. Test\n1. Deploy\n\nTo see this on a board, simply create a label for each stage. Create a new board\nand add a list for each stage. You can drag and drop lists to put them in the\ndesired order.\n\n### 2. Cross-functional planning with different categories\n\nSimilar to the workflow view, you can create a GitLab issues board based on categories.\n\nWhen working on a particular product or feature, you might want a high-level view\nof what each team is working on. You might create a label and a list for each team,\nor, perhaps you want to see what's being worked on across many projects or product lines.\n\n![Using an Issue Board for planning with categories](https://about.gitlab.com/images/blogimages/categories.png){: .shadow.large.center}\n\nIn the example above, we have three label lists: `time tracking`, `portfolio management`,\nand `description templates`. Each list represents a particular product area. So\nwith this one board, you'll be able to see what open issues belong to those three\nareas. If you further scope the board to a particular milestone, you'll be able\nto see which of those issues are targeted to be worked on in that milestone.\n\nThis is just one example of how you can use an issue board for planning your work.\nAgain, because the issue board is created using labels, your options are unlimited!\n\n### 3. Team visibility with assignee lists\n\nYou can easily monitor what has been assigned to individuals on your team with\nassignee lists. Assignee lists show issues that are assigned to a specific user,\nand you can create a board to display a list for everyone on your team for a\nquick view of who is working on what.\n\n![Example assignee issue board view](https://about.gitlab.com/images/blogimages/team-board.png){: .shadow.large.center}\n\n### 4. GitLab Issues and milestone planning\n\nAs part of our project management capabilities, we've built in [milestones](https://docs.gitlab.com/ee/user/project/milestones/).\nMilestones are used to track issues and merge requests associated with a specific goal\nto be accomplished within a specific time frame.\n\nSimilar to our assignee lists, you will be able to quickly create a milestone view directly from your board. This is particularly\nuseful for those leveraging an [Agile workflow](/solutions/agile-delivery/). With the milestone list, you can\neasily move issues (stories) between different milestones (i.e. sprints, iterations).\n\n| Adding a milestone list | Milestone list |\n|-------------------------|----------------|\n| ![Create milestone list](https://about.gitlab.com/images/blogimages/create-milestone-list.png){: .shadow} | ![Milestone list example](https://about.gitlab.com/images/blogimages/milestone-list-issue-board.png){: .shadow} |\n\nOf course, this is just the tip of the iceberg. Stay tuned for the our next post on\nusing GitLab for [portfolio planning and management](/blog/gitlab-for-agile-portfolio-planning-project-management/) where we will cover a few of other favorite GitLab features like [Epics](https://docs.gitlab.com/ee/user/group/epics/), [Roadmaps](https://docs.gitlab.com/ee/user/group/roadmap/), and [Milestones](https://docs.gitlab.com/ee/user/project/milestones/).\n\nUpdate as of June 26 2020: Removed the \"Coming Soon\" label on Milestones since it has been released as of 11.2",[9,743,744],"production","agile",{"slug":746,"featured":6,"template":699},"4-ways-to-use-gitlab-issue-boards","content:en-us:blog:4-ways-to-use-gitlab-issue-boards.yml","4 Ways To Use Gitlab Issue Boards","en-us/blog/4-ways-to-use-gitlab-issue-boards.yml","en-us/blog/4-ways-to-use-gitlab-issue-boards",{"_path":752,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":753,"content":759,"config":768,"_id":770,"_type":13,"title":771,"_source":15,"_file":772,"_stem":773,"_extension":18},"/en-us/blog/5-ways-gitlab-duo-chat-ai-can-supercharge-product-management",{"title":754,"description":755,"ogTitle":754,"ogDescription":755,"noIndex":6,"ogImage":756,"ogUrl":757,"ogSiteName":685,"ogType":686,"canonicalUrls":757,"schema":758},"5 ways GitLab Duo Chat AI can supercharge product management","Discover how to transform all aspects of product management, boosting efficiency and improving decision-making. Learn practical tips for leveraging AI throughout your PM workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666405/Blog/Hero%20Images/GitLab_Duo_Blog_Hero_1800x945_r2_B__1_.png","https://about.gitlab.com/blog/5-ways-gitlab-duo-chat-ai-can-supercharge-product-management","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 ways GitLab Duo Chat AI can supercharge product management\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2024-09-25\",\n      }",{"title":754,"description":755,"authors":760,"heroImage":756,"date":762,"body":763,"category":764,"tags":765},[761],"Tim Rizzi","2024-09-25","As a product manager at GitLab, I constantly seek ways to enhance my productivity and decision-making. Recently, I discovered an unexpected ally in [GitLab Duo Chat](https://about.gitlab.com/gitlab-duo/). Let me share how this AI-powered assistant has transformed my approach to product management.\n\n## The daily PM challenges\n\nLike many PMs, I juggle many daily tasks — from summarizing issues and merge requests to crafting detailed product specs and investment cases. The sheer volume of writing and analysis required was overwhelming, and I worried about potential cognitive biases influencing my work.\n\nTo address these challenges, I needed to:\n\n1. Increase my efficiency in handling documentation tasks.\n2. Enhance the quality and objectivity of my product decisions.\n3. Improve my communication with various stakeholders.\n\n## Leveraging GitLab Duo Chat\n\nI decided to experiment with GitLab Duo Chat as a support tool for my daily PM tasks. Here's how I incorporated it into my workflow, with real examples:\n\n### 1. Issue creation and refinement\n\nI was tasked with creating an issue for a new feature that would enhance the Conan repository by adding [revision support](https://gitlab.com/gitlab-org/gitlab/-/issues/479437). To start, I prompted Chat: \"Can you create an issue to add support for downloading Conan revisions to the GitLab package registry? Think about the product's value from a C++ developer and a platform engineer perspective.\"\n\nGitLab Duo provided a comprehensive draft issue, including:\n\n* a clear description of the feature\n* value propositions for both C++ developers and platform engineers\n* implementation details\n* acceptance criteria\n* related links and labels\n\nThis gave me a solid starting point with all the necessary sections, which I refined and customized. Instead of spending an hour writing that issue, I spent more time thinking about how this feature fits within the broader GitLab strategy.\n\n### 2. Summarizing and reviewing\n\nI often ask GitLab Duo to summarize lengthy merge requests or complicated epics. For instance, when reviewing the epic for [protected container images](https://gitlab.com/groups/gitlab-org/-/epics/9825), I asked GitLab Duo to summarize the key changes and their value for GitLab customers and users from a PM perspective.\n\nGitLab Duo provided a detailed summary, highlighting the following:\n\n* enhanced security and compliance features\n* improved governance and control\n* streamlined operations\n* increased confidence in CI/CD pipelines\n* better alignment with DevOps practices\n* customizable security posture\n* improved audibility\n* cost-effective security solution\n\nThis summary helped me quickly grasp and share the key points with my team more effectively.\n\n### 3. Project status updates\n\nTo get a quick overview of a project's status, I asked GitLab Duo to provide an update \"like a hyper-focused project manager.\" The response included:\n\n* overall progress\n* completed items\n* in-progress tasks\n* next steps\n* timeline\n* risks and issues\n* stakeholder input\n* action items\n* key performance indicators\n* communication plans\n\nThis structured overview allowed me to quickly assess the project's status and identify areas needing attention.\n\n### 4. Value proposition and metric analysis\n\nWhen I needed to articulate the value of measuring monthly active users and storage costs for the Package stage, I asked GitLab Duo for help. The response provided a comprehensive explanation, covering:\n\n* user engagement and adoption insights\n* feature prioritization\n* capacity planning\n* business model optimization\n* customer success indicators\n* cost management\n* competitive positioning\n* product health assessment\n* ROI calculation\n* future planning considerations\n\nThis well-structured response gave me more than enough content and helped me better articulate the value of these essential metrics.\n\n### 5. Challenging cognitive biases\n\nTo reveal blind spots in my thinking, I often ask GitLab Duo to answer in specific personas, such as:\n\n* a hyper-focused project manager\n* a frustrated customer\n* a developer who doesn't have time to read issues\n* a product leader who demands excellence\n\nFor example, when I created an investment case for GitLab Package, I asked GitLab Duo to review it as a hypercritical CEO. This perspective helped me consider including financial projections and competitive analysis in my proposal, which I had initially overlooked.\n\n## A more efficient and effective PM\n\nThe impact of integrating GitLab Duo Chat into my workflow has been significant:\n\n1. **Increased productivity:** Tasks that used to take hours now often take minutes. Creating the initial draft of the Conan issue took about five minutes with GitLab Duo, compared to the usual 30-45 minutes I'd spend starting from scratch.\n2. **Enhanced quality:** The initial drafts produced with GitLab Duo's help are more comprehensive and structured. For the protected container images project, GitLab Duo's input helped me more effectively summarize the value of my go-to-market strategy and the project's current status.\n3. **Improved decision-making:** I've created more robust, well-rounded proposals using GitLab Duo to challenge my assumptions. The critique of my investment case led to a more thorough cost-benefit analysis.\n4. **Continuous improvement:** The feedback loop of writing, getting GitLab Duo's input, and refining has helped me improve my writing and analytical skills. My first drafts are becoming stronger, even without GitLab Duo's assistance.\n\n## A new era of AI-assisted product management\n\nWhile GitLab Duo Chat hasn't replaced my role as a PM, it has become an invaluable tool in my arsenal. It's helped me be more efficient, thorough, and objective. As AI assistants like GitLab Duo continue to evolve, I'm excited about the potential for further enhancing our product management practices.\n\nHowever, it's crucial to remember that GitLab Duo is a tool, not a replacement for human insight and creativity. The best results come from combining GitLab Duo's capabilities with our expertise and understanding of our unique business context.\n\n## Try GitLab Duo\n\nI encourage fellow PMs to explore how AI assistants like GitLab Duo Chat can augment their work. Here are some steps you can take:\n\n1. **Start small:** Use GitLab Duo for simple tasks like summarizing issues or drafting initial proposals.\n2. **Experiment with personas:** Ask GitLab Duo to review your work from different perspectives to uncover blind spots.\n3. **Refine your prompts:** Learn how to craft effective prompts to get the most valuable responses from GitLab Duo.\n4. **Share your experiences:** Discuss your use of AI tools with your team and contribute to best practices.\n\nWith the right approach, these tools can help us focus more on strategic thinking and less on routine tasks, ultimately leading to better products and happier customers.\n\n> [Try GitLab Duo today!](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial&toggle=gitlab-duo-pro)\n","ai-ml",[766,767,9],"AI/ML","product",{"slug":769,"featured":90,"template":699},"5-ways-gitlab-duo-chat-ai-can-supercharge-product-management","content:en-us:blog:5-ways-gitlab-duo-chat-ai-can-supercharge-product-management.yml","5 Ways Gitlab Duo Chat Ai Can Supercharge Product Management","en-us/blog/5-ways-gitlab-duo-chat-ai-can-supercharge-product-management.yml","en-us/blog/5-ways-gitlab-duo-chat-ai-can-supercharge-product-management",{"_path":775,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":776,"content":782,"config":792,"_id":794,"_type":13,"title":795,"_source":15,"_file":796,"_stem":797,"_extension":18},"/en-us/blog/a-deep-dive-into-the-security-analyst-persona",{"title":777,"description":778,"ogTitle":777,"ogDescription":778,"noIndex":6,"ogImage":779,"ogUrl":780,"ogSiteName":685,"ogType":686,"canonicalUrls":780,"schema":781},"A deep dive into the Security Analyst persona","See how we created our new Security Analyst persona, and how we are already putting it to use.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663736/Blog/Hero%20Images/a-deep-dive-into-the-security-analyst-persona.jpg","https://about.gitlab.com/blog/a-deep-dive-into-the-security-analyst-persona","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A deep dive into the Security Analyst persona\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andy Volpe\"}],\n        \"datePublished\": \"2019-02-12\",\n      }",{"title":777,"description":778,"authors":783,"heroImage":779,"date":785,"body":786,"category":787,"tags":788},[784],"Andy Volpe","2019-02-12","\nAs GitLab grows, so does our need for new, more area-specific personas. Recently, as part of our [effort to create personas](/blog/personas-and-empathy-building/), I was given a chance to craft one. As the UX designer for [the Secure team](/handbook/engineering/development/sec/secure/) here at GitLab, I jumped at the opportunity to learn more about security professionals, and how we may create products and features to meet their needs. Throughout the entire process, I gained a greater sense of empathy and a deeper understanding of the needs, goals, and pain points of security professionals. The result was our new [Security Analyst Persona, Sam](https://handbook.gitlab.com/handbook/product/personas/#sam-security-analyst). However, I will add a caveat that this is not the end of the process, but the beginning of how we can better support security professionals with new features and functionality that address their specific needs. You can peruse the highlights and the persona itself below, and let us know what you think by tweeting us [@gitlab](https://twitter.com/gitlab)!\n\n## The research\n\nHere are some takeaways from the [10 interviews](https://gitlab.com/gitlab-org/ux-research/issues/97) I conducted to create the Security Analyst persona.\n\nWe’ve learned that the Security Analyst is a bit of a generalist when it comes to their day-to-day tasks. From the research, I found that there isn’t one specific task that defines their day, but a grouping of tasks under the umbrella of security. I’ve written the summary of the persona to reflect the somewhat general nature of the Security Analysts' role:\n\n>\"I wear lots of hats, but the majority of my time is spent monitoring and flagging events, running down high-priority tasks and working with other teams to implement new systems.\"\n\n### What motivates a Security Analyst?\n\nSecurity Analysts strive for order in the chaos and, based on our research, are taking steps to achieve that order. One specific example:\n\n>When I’m monitoring my dashboards, I want to see everything I am monitoring in one tool, so I can do my job easier and more efficiently.\n\nMoving between different tools and dashboards was identified as a significant problem area for Security Analysts. They found it hard to create a workflow that was conducive to remediating security issues while having to work across multiple tools.\n\nAnother motivation I found during the research was that Security Analysts desire to be more proactive than reactive in their work. I’ve summarized this by adding the objective below:\n\n>When security testing, I want to be more proactive than reactive, so I can anticipate potential threats or vulnerabilities before the bad guys do.\n\nBy being more proactive or shifting left in their work, Security Analysts are able to identify and remediate potential vulnerabilities before they become a problem or even lead to an attack.\n\n### What are some of the frustrations Security Analysts have?\n\n>I’m frustrated I don’t have the resources to complete this project to its specifications.\n\nand\n\n>I’m frustrated when I know how to fix a security issue but the red tape at my company doesn’t allow me to in a timely manner.\n\nA common theme seen throughout the research was that of constrained resources and time. Often we found that security teams were small in comparison to other teams within their organization. This resource discrepancy leads to work being done at such a pace that the project can’t be completed to its specifications or in a timely manner.\n\n### How are we using the security Analyst persona at GitLab?\n\nWe are all-in on making the Security Persona a first-class persona here at GitLab. Recently we launched the [Group-level Security Dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/), which allows security professionals to monitor all their projects, in one view, for vulnerabilities, and gives them the ability to take action on those vulnerabilities right from the dashboard itself.\n\nAside from security dashboards, we are constantly dreaming up more security features and enhancements that will help users keep their instances, groups, and projects secure. You can [see our roadmap here](/direction/#future-releases) for more on what's coming.\n\n## The persona\n\n![Sam, Security Analyst persona](https://about.gitlab.com/images/blogimages/security-analyst-persona.png){: .shadow.center}\n\nKeep an eye out for the rest of our series on the [new personas](https://handbook.gitlab.com/handbook/product/personas/)!\n\n[Photo](https://unsplash.com/photos/z55CR_d0ayg) by [Andrew Neel](https://unsplash.com/@andrewtneel) on Unsplash\n{: .note}\n","security",[789,790,787,791,9],"testing","inside GitLab","UX",{"slug":793,"featured":6,"template":699},"a-deep-dive-into-the-security-analyst-persona","content:en-us:blog:a-deep-dive-into-the-security-analyst-persona.yml","A Deep Dive Into The Security Analyst Persona","en-us/blog/a-deep-dive-into-the-security-analyst-persona.yml","en-us/blog/a-deep-dive-into-the-security-analyst-persona",{"_path":799,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":800,"content":806,"config":813,"_id":815,"_type":13,"title":816,"_source":15,"_file":817,"_stem":818,"_extension":18},"/en-us/blog/adopt-agile-and-devops-for-ibm-z",{"title":801,"description":802,"ogTitle":801,"ogDescription":802,"noIndex":6,"ogImage":803,"ogUrl":804,"ogSiteName":685,"ogType":686,"canonicalUrls":804,"schema":805},"The benefits of DevOps practices for IBM Z","GitLab aims to provide a unified enterprise-wide DevOps platform with enhanced support for IBM Z. Here are three areas that can start to align DevOps and mainframe development.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666262/Blog/Hero%20Images/default-blog-image.png","https://about.gitlab.com/blog/adopt-agile-and-devops-for-ibm-z","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The benefits of DevOps practices for IBM Z\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vick Kelkar\"}],\n        \"datePublished\": \"2021-09-10\",\n      }",{"title":801,"description":802,"authors":807,"heroImage":803,"date":809,"body":810,"category":811,"tags":812},[808],"Vick Kelkar","2021-09-10","\n\nAs more organizations adopt open source software, [DevOps](/topics/devops/) and cloud computing, teams are moving towards a hybrid approach to application development and deployments. This is a particular challenge for siloed mainframe development teams that typically work within their own development tools – and those tools are are not the same ones used by their distributed developer counterparts. These disparate toolsets are not integrated between build, package, test, and deploy steps. GitLab aims to provide a [unified enterprise-wide DevOps platform](/solutions/devops-platform/) with enhanced support for IBM z/OS® applications. Here are three critical areas to focus on that can help bridge the gap between mainframe and \"modern\" DevOps development.\n\n## Common workflows for hybrid environments\n\nDevOps is naturally aligned to Agile processes with its focus on continuous improvements, freqent releases and adherence to business objectives. As organizations adopt a hybrid infrastructure to meet their needs, it becomes extremely important to have a common workflow (and tools!) for continuous software development. The development workflow should be the same whether you are building a [cloud native](/topics/cloud-native/) container application for Red Hat OpenShift or whether you are refactoring COBOL applications on IBM Z. Ideally, in both scenarios, the same source code management, along with Git commit and merge-request workflows should be used, regardless of the type of application code or where it is deployed. The advantage of using the same software development lifecycle methodology and tools allows an organization to drive an enterprise-wide DevOps strategy.  Additionally, this helps organizations to attract mainframe developers, allowing them to move between mainframe and cloud-based application development projects,  enhancing developer job satisfaction, growth and retention.\n\n## Security and compliance vs speed\n\nIn an enterprise-wide mission critical application where security and compliance are paramount, a deployment cadence of once a quarter is considered satisfactory, but in a consumer-facing containerized application (using [microservices](https://about.gitlab.com/topics/microservices/)), multiple deployments can be easily achieved in a single day. Release speeds can differ but software security isn't negotiable. Every organization needs to ensure security by making sure best practices are followed for all application development processes. Ultimately, the ideal situation for every organization would be shortened development cycle times, while at the same time enabling developers to identify any security issues ahead of time. Rather than integrating security tools into the complex DevOps toolchain, a single application for the entire software development lifecycle can improve your security risk posture, simplify compliance/audit, and accelerate software development.  With [DevSecOps methodologies](/solutions/security-compliance/), security testing becomes an integral part of the software development lifecycle (SDLC) eliminating friction between siloed development and security teams, and dramatically improving code quality and deployment cycle times.\n\n## Increase collaboration and automation\n\nDevOps practices and elastic cloud computing resources helpdevelopers become more self-sufficient as they deploy their applications at scale. They can request identical compute resources on every new release of their application. Automating the deployment steps through a CI/CD pipeline makes rolling out the new version of an application more efficient and less prone to errors. While [CI/CD tooling](/topics/ci-cd/) increases automation for software inspection, development and delivery, resilient systems like IBM Z or RedHat OpenShift Container Platform have their own built-in automation to address application testing, reliability and availability.\n\nMost organizations have a security team that looks at the risk posture of an application. This can range from high availability of applications for business continuity reasons, to a vulnerability in a software library.  In most cases, the security team is much smaller than the application development teams and uses different tools to analyze risks. This creates an impediment for organizations to scale and applications to scale in production. Using the same workflow and shared data model between security and development teams allows knowledge sharing and helps break down team silos. For example, a Red Hat OpenShift application developer can create a security Issue in their GitLab Ultimate DevOps Platform, and the security team can comment on that same issue and analyze the risk.\n\nCloud computing and its elasticity offers advantages for certain applications and use cases while on-premise systems like IBM Z offer advantages for high transaction applications and use cases. Red Hat Openshift offers a cloud native approach to application deployments. GitLab Ultimate integrates with container platforms as well as IBM Z environments. This gives customers the flexibility to deploy their application in their desired environment while helping increase automation and Agile practices in their organization.\n\nTo learn more\n\n* Visit[ GitLab Ultimate for IBM z/OS](https://www.ibm.com/products/gitlab-ultimate/zos)\n* Hear GitLab and IBM experts discuss the benefits of integrating GitLab into your DevOps solution -[Automate your Z DevOps CI/CD pipeline with GitLab](https://mediacenter.ibm.com/id/1_djnxx05v)\n* Learn about the management of mainframe apps lifecycle with[IBM Z DevOps and GitLab](https://mediacenter.ibm.com/id/1_oxj8eseu).\n* Take advantage of the[DevOps Acceleration Program](https://ibm.github.io/mainframe-downloads/DevOps_Acceleration_Program/devops-acceleration-program.html) to partner with IBM for a successful transformation\n","culture",[744,696,9],{"slug":814,"featured":6,"template":699},"adopt-agile-and-devops-for-ibm-z","content:en-us:blog:adopt-agile-and-devops-for-ibm-z.yml","Adopt Agile And Devops For Ibm Z","en-us/blog/adopt-agile-and-devops-for-ibm-z.yml","en-us/blog/adopt-agile-and-devops-for-ibm-z",{"_path":820,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":821,"content":827,"config":836,"_id":838,"_type":13,"title":839,"_source":15,"_file":840,"_stem":841,"_extension":18},"/en-us/blog/advanced-search-data-migrations",{"title":822,"description":823,"ogTitle":822,"ogDescription":823,"noIndex":6,"ogImage":824,"ogUrl":825,"ogSiteName":685,"ogType":686,"canonicalUrls":825,"schema":826},"GitLab's data migration process for Advanced Search","We needed a more streamlined data migration process for Advanced search.\nHere's what we did.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682017/Blog/Hero%20Images/advanced-search-migrations.jpg","https://about.gitlab.com/blog/advanced-search-data-migrations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's data migration process for Advanced Search\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dmitry Gruzd\"}],\n        \"datePublished\": \"2021-06-01\",\n      }",{"title":822,"description":823,"authors":828,"heroImage":824,"date":830,"body":831,"category":832,"tags":833},[829],"Dmitry Gruzd","2021-06-01","For some time now, GitLab has been working on enabling the Elasticsearch\n\nintegration on GitLab.com to allow as many GitLab.com users as possible\naccess\n\nto the [Advanced Global\nSearch](https://docs.gitlab.com/ee/user/search/advanced_search.html)\n\nfeatures. Last year, after enabling Advanced Search for all licensed\ncustomers on\n\nGitLab.com we were thinking how to simplify the rollout of some Advanced\nSearch\n\nfeatures that require changing the data in Elasticsearch.\n\n\n(If you're interested in the lessons we learned on our road to Enabling\n\nElasticsearch for GitLab.com, you can read [all about\nit](/blog/elasticsearch-update/).\n\n\n## The data migration process problem \n\n\nSometimes we need to change mappings of an index or backfill a field, and\n\nreindexing everything from scratch or using [Zero downtime\nreindexing](https://docs.gitlab.com/ee/integration/elasticsearch.html#zero-downtime-reindexing)\n\nmight seem like an obvious solution. However, this is not a scalable option\nfor\n\nbig GitLab instances. GitLab.com is the largest known installation of GitLab\nand\n\nas such has a lot of projects, code, issues, merge requests and other things\nthat\n\nneed to be indexed. For example, at the moment our Elasticsearch cluster has\n\nalmost 1 billion documents in it. It would take many weeks or even months to\n\nreindex everything and for all that time indexing would need to remain\npaused, therefore\n\nsearch results would quickly become outdated.\n\n\n## Original plan for multi-version support\n\n\nOriginally, we were planning to introduce multi-version support using an\napproach\n\nthat is fully reliant on GitLab to manage both indices, reading from the old\none\n\nand writing to both until the migration is finished. You can read more\ninformation at\n\n[!18254](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/18254) and\n\n[&1769](https://gitlab.com/groups/gitlab-org/-/epics/1769). As of writing\nthis,\n\nmost of the code for this approach still exists in GitLab in a\nhalf-implemented form.\n\n\nThere were 2 primary concerns with this approach:\n\n\n1. Reindexing would require the GitLab application to read every single\ndocument\n\nfrom the storage and send it to Elasticsearch again. Doing so\n\nwould put a big strain on different parts of the application, such as\ndatabase,\n\nGitaly, and Sidekiq.\n\n1. Reindexing everything from GitLab to the cluster again may be very\nwasteful on\n\noccasions where you only need to change a small part of the index. For\nexample, if\n\nwe want to add epics to the index, it is very wasteful to reindex every\ndocument\n\nin the index when we could very quickly just index all the epics. There are\nmany\n\nsituations where we will be trying to perform some migration that can be\ndone more\n\nefficiently using a targeted approach (e.g. adding a new field to a document\ntype\n\nonly requires reindexing all the documents that actually have that field).\n\n\nFor these reasons we've decided to create a different data migration\nprocess.\n\n\n## Our revised data migration process\n\n\nWe took inspiration from the [Rails DB\nmigrations](https://guides.rubyonrails.org/active_record_migrations.html).\n\nWe wanted to apply the best practices from it without having to re-architect\nwhat\n\nthe Rails team has already implemented.\n\n\nFor example, we've decided that we would have a special directory with\ntime-stamped\n\nmigration files. We wanted to achieve a strict execution order so that many\n\nmigrations might be shipped simultaneously. A special background processing\nworker\n\nwill be checking this folder on schedule. This is slightly different to\nrails background migrations where the operator is required to manually run\nthe migration. We decided to make it fully automated and run it in the\nbackground to avoid the need for self-managed customers to add extra steps\nto the migration process. This would have likely made it much more difficult\nfor everyone involved as there are many ways to run GitLab. This extra\nconstraint also forces us to always think of migrations as possibly\nincomplete at any point in the code which is essential for zero-downtime.\n\n\nAt first, we wanted to store the migration state in the Postgresql database,\nbut\n\ndecided against it since this may not be perfect for the situation where a\nuser\n\nwants to connect a new Elasticsearch cluster to GitLab. It's better to store\nthe\n\nmigrations themselves in the Elasticsearch cluster itself so they're more\nlikely to be in\n\nsync with the data.\n\n\nYou can see your new migration index in your Elasticsearch cluster. It's\ncalled\n\n`gitlab-production-migrations`. GitLab stores a few fields there. We use the\n\nversion number as the document id. This is an example document:\n\n\n```\n\n{\n    \"_id\": \"20210510143200\",\n    \"_source\": {\n        \"completed\": true,\n        \"state\": {\n        },\n        \"started_at\": \"2021-05-12T07:19:08.884Z\",\n        \"completed_at\": \"2021-05-12T07:19:08.884Z\"\n    }\n}\n\n```\n\n\nThe state field is used to store data that's required to run batched\nmigrations.\n\nFor example, for batched migrations we store a slice number and a task id\nfor\n\ncurrent Elasticsearch reindex operation and we update the state after every\nrun.\n\n\nThis is how an example migration looks:\n\n\n```ruby\n\nclass MigrationName \u003C Elastic::Migration\n  def migrate\n    # Migrate the data here\n  end\n\n  def completed?\n    # Return true if completed, otherwise return false\n  end\nend\n\n```\n\n\nThis looks a lot like [Rails DB\nmigrations](https://guides.rubyonrails.org/active_record_migrations.html),\n\nwhich was our goal from the beginning. The main difference is that it has an\nadditional method to\n\ncheck if a migration is completed. We've added that method because we need\nto\n\nexecute asynchronous tasks quite often and we want to check if it's\ncompleted\n\nlater in a different worker process.\n\n\n## Migration framework logic\n\n\nThis is a simple flow chart to demonstrate the high level logic of the new\nmigration framework.\n\n\n```mermaid\n\ngraph TD\n    CRON(cron every 30 minutes) --> |executes| WORKER[MigrationWorker]\n    WORKER --> B(an uncompleted migration is found)\n    B --> HALT(it's halted)\n    B --> UN(it's uncompleted)\n    B --> COMP(it's finished)\n    HALT --> WARN(show warning in the admin UI)\n    WARN --> EX(exit)\n    UN --> PREF(migration preflight checks)\n    PREF --> RUN(execute the migration code)\n    COMP --> MARK(mark it as finished)\n    MARK --> EX\n```\n\n\nAs you can see above, there are multiple different states of a migration.\nFor example,\n\nthe framework allows it to be halted when it has too many failed attempts.\nIn\n\nthat case, the warning will be shown in the admin UI with a button for\nrestarting\n\nthe migration.\n\n\n![How the warning looks\nlike](https://about.gitlab.com/images/blogimages/advanced_search/halted_warning.png)\n\n\n## Configuration options\n\n\nWe've introduced many useful configuration options into the framework, such\nas:\n\n\n- `batched!` - Allows the migration to run in batches. If set, the worker\nwill\n\nre-enqueue itself with a delay which is set using the `throttle_delay`\noption\n\ndescribed below. We use this option to reduce the load and ensure that the\n\nmigration won't time out.\n\n\n- `throttle_delay` - Sets the wait time in between batch runs. This time\nshould be\n\nset high enough to allow each migration batch enough time to finish.\n\n\n- `pause_indexing!` - Pauses indexing while the migration runs. This setting\nwill\n\nrecord the indexing setting before the migration runs and set it back to\nthat\n\nvalue when the migration is completed. GitLab only uses this option when\n\nabsolutely necessary since we attempt to minimize the downtime as much as\npossible.\n\n\n- `space_requirements!` - Verifies that enough free space is available in\nthe\n\ncluster when the migration is running. This setting will halt the migration\nif the\n\nstorage required is not available. This option is used to\n\nprevent situations when your cluster runs out of space when attempting to\nexecute\n\na migration.\n\n\nYou can see the up-to-date list of options in this development\n[documentation\nsection](https://docs.gitlab.com/ee/development/elasticsearch.html#migration-options-supported-by-the-elasticmigrationworker).\n\n\n## Data migration process results\n\n\nWe implemented the Advanced Search migration framework in the 13.6 release\nand\n\nhave been improving it since. You can see some details in the original issue\n\n[#234046](https://gitlab.com/gitlab-org/gitlab/-/issues/234046). The only\n\nrequirement for this new feature is that you should create your index using\nat\n\nleast version 13.0. We have that requirement since we're heavily utilizing\n\naliases, which were introduced in 13.0. As you might know, over the last few\n\nreleases we've been working on separating different document types into\ntheir own\n\nindices. This migration framework has been a tremendous help for our\ninitiative.\n\nWe've already completed the migration of issues (in 13.8), comments (in\n13.11),\n\nand merge requests (in 13.12) with a noticeable performance improvement.\n\n\nSince we've accumulated so many different migrations over the last few\nreleases\n\nand they require us to support multiple code paths for a long period of\ntime,\n\nwe've decided to remove older migrations that were added prior to the 13.12\n\nrelease. You can see some details in this\n[issue](https://gitlab.com/gitlab-org/gitlab/-/issues/329952).\n\nWe plan to continue the same strategy in the future, which is one of the\nreasons\n\nwhy you should always upgrade to the latest minor version before migrating\nto a\n\nmajor release.\n\n\nIf you're interested in contributing to features that require Advanced\nSearch\n\nmigrations, we have a dedicated [documentation\nsection](https://docs.gitlab.com/ee/development/elasticsearch.html#creating-a-new-advanced-search-migration)\n\nthat explains how to create one and lists all available options for it.","engineering",[834,835,9],"features","releases",{"slug":837,"featured":6,"template":699},"advanced-search-data-migrations","content:en-us:blog:advanced-search-data-migrations.yml","Advanced Search Data Migrations","en-us/blog/advanced-search-data-migrations.yml","en-us/blog/advanced-search-data-migrations",{"_path":843,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":844,"content":850,"config":856,"_id":858,"_type":13,"title":859,"_source":15,"_file":860,"_stem":861,"_extension":18},"/en-us/blog/agile-best-practices",{"title":845,"description":846,"ogTitle":845,"ogDescription":846,"noIndex":6,"ogImage":847,"ogUrl":848,"ogSiteName":685,"ogType":686,"canonicalUrls":848,"schema":849},"5 Agile best practices","Make the most out of Agile development with these technical best practices.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678597/Blog/Hero%20Images/run-agile-in-gitlab.jpg","https://about.gitlab.com/blog/agile-best-practices","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 Agile best practices\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2019-08-13\",\n      }",{"title":845,"description":846,"authors":851,"heroImage":847,"date":853,"body":854,"category":718,"tags":855},[852],"Suri Patel","2019-08-13","\n\n[Agile development](/solutions/agile-delivery/) can have\na transformative impact on teams and applications. These five best practices can\nhelp your team streamline and accelerate delivery.\n\n## 1. Continuous integration\n\n[Continuous integration](/solutions/continuous-integration/) works by pushing small code chunks\nto an application’s codebase hosted in a Git repository. Every push triggers a pipeline of scripts to build,\ntest, and validate code changes before merging them into the main branch. By\nbuilding and testing each change as early as possible – usually several times a\nday – teams can detect errors as quickly as possible, reduce integration problems,\nand avoid compounding problems, allowing teams to develop faster, with more confidence.\n\n## 2. Retrospectives\n\n[Retrospectives](/blog/how-we-used-gitlab-to-automate-our-monthly-retrospectives/) are conversations about what went well and what went wrong in a\nproject or iteration. One of the most important Agile qualities is continuous\nlearning, and retros provide a transparent way to discuss how various teams\nexperienced a sprint and voice any concerns or ideas.\n\n> “A successful team is a happy team. Bringing down cycle time can help a team be more\nsuccessful because they are shipping value more often, but your team might have more\nimportant things that must be addressed first. Using retrospectives will help you figure\nout what success means to your team, and what needs to be done to achieve\nthat success.” – [Rachel Nienaber](/company/team/#rnienaber), engineering manager, Geo\n\nTo generate the best results from a retrospective, there should be\n[a safe environment for feedback and discussion and a plan for advancing discussion\nfrom facts to\nconclusions](/handbook/engineering/management/group-retrospectives/).\n\n## 3. Pairing\n\nPairing sessions can help team members work through features both large and small,\ninspiring problem-solving and ideation. When pairing, one team member writes code\nwhile the other reviews each line. Pairing results in fewer bugs, increased innovation,\nand skills development. Team members can learn from each other and discover best\npractices. Team members can spontaneously pair or managers can set up a more\n[formal pairing session process](https://gitlab.com/gitlab-com/support/support-training/issues?label_name%5B%5D=pairing) 🍐\n\n## 4. Iterative development\n\nWhen teams iterate with small changes, they can\n[reduce cycle time](/blog/strategies-to-reduce-cycle-times/) and spark rapid feedback cycles.\nBy making the quickest changes possible to improve a user's outcome, teams can add\nuseful functionality with fewer bugs or usability issues since potential problems\nare spotted early. Other benefits of iterative development include faster time to\nmarket, reduced scope creep, and increased morale (i.e. team members can see their\nwork right away rather than wait several releases).\n\n## 5. Burndown charts\n\nIf your team uses a Scrum framework, consider using [burndown charts](https://docs.gitlab.com/ee/user/project/milestones/burndown_and_burnup_charts.html) to monitor\nsprint progress. Teams can visualize the work scoped in the current sprint to\nunderstand what work has been completed, allowing them to react to risks quickly\nand adapt. This information can help business stakeholders understand that anticipated\nfeatures may be delayed until a future sprint.\n\nEmploying Agile best practices will have a significant positive impact on efficiently\ncreating customer-centric products.\n\nDo you have any best practices that have transformed your team’s development process? We’d love to hear them!\n\nCover image by [Mikael Kristenson](https://unsplash.com/@mikael_k?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/6GjHwABuci4)\n{: .note}\n",[744,696,722,9],{"slug":857,"featured":6,"template":699},"agile-best-practices","content:en-us:blog:agile-best-practices.yml","Agile Best Practices","en-us/blog/agile-best-practices.yml","en-us/blog/agile-best-practices",{"_path":863,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":864,"content":870,"config":875,"_id":877,"_type":13,"title":878,"_source":15,"_file":879,"_stem":880,"_extension":18},"/en-us/blog/agile-for-developers-refactor-code",{"title":865,"description":866,"ogTitle":865,"ogDescription":866,"noIndex":6,"ogImage":867,"ogUrl":868,"ogSiteName":685,"ogType":686,"canonicalUrls":868,"schema":869},"Agile for developers: Refactoring code","The time commitment involved in refactoring may cause hesitation, but the impact on developer productivity and efficiency outweighs the initial discomfort.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680945/Blog/Hero%20Images/refactorpost.jpg","https://about.gitlab.com/blog/agile-for-developers-refactor-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Agile for developers: Refactoring code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2019-12-18\",\n      }",{"title":865,"description":866,"authors":871,"heroImage":867,"date":872,"body":873,"category":300,"tags":874},[852],"2019-12-18","\n\nIt’s difficult to cook in a cluttered, untidy kitchen. You need a specific knife, but it needs to be washed. You need that one mixing bowl, but it’s not in the cabinet where you usually keep it. You can’t find a place for the cutting board, because the countertop has no room. Software development is similar to cooking - you need a tidy codebase to deliver rapidly. If you don’t clean your code as you develop, you can find yourself surrounded by a mess. Fortunately, refactoring code helps you keep your [source code](/solutions/source-code-management/) neat and tidy.\n\n## Refactor code to accelerate Agile delivery\n\nRefactoring simplifies the design of code, without altering its behavior, to increase maintainability and readability. Teams refactor code before adding updates or releasing features in an effort to improve code quality so that future iterations built on original code are not bogged down by confusion and clutter. As such, this process helps to [accelerate Agile delivery](/solutions/agile-delivery/).\n\n>  “None of my inventions came by accident. I see a worthwhile need to be met and I make trial after trial until it comes.” — Thomas Edison\n\nRefactoring isn’t a random exercise in which developers tinker with code. It’s a precise process designed to enhance the internal structure of a program’s source code. While it may seem like a tedious task, refactoring has long-term business value.  \n\n## How to get started with refactoring\n\nStaring at unrefactored code trying to determine where to start can be a frightening experience. Fortunately, there are a few methods you can use to make refactoring a little easier. \n\n### Incremental refactoring\n\nThe simplest way to get started is to make small improvements. Make a list of the parts of your system that change most often and refactor those areas. Making incremental improvements on the files that your team works with most often can help you steadily work through your code. By targeting the areas that are used most often, refactoring can have a significant impact on your overall system.\n\n### Test-driven development\n\nYou can think of test-driven development as cleaning as you’re coding. If you’d like to revolutionize the way your team develops and make refactoring an integrated aspect of your workflow, you can embrace test-driven development, which incorporates coding, unit tests, and refactoring to program in short development cycles. Developers write a failing automated test to define a new function before writing the smallest amount of code to pass the test. The code is then refactored to an ideal state.\n\n## The benefits of refactoring code\n\nRefactoring prevents code rot, such as bad dependencies between classes, myriad patches, incorrect allocation of class responsibilities, and duplicate code, resulting in a more efficient code base. The time taken to refactor pays dividends, since it’s easier to clean code closer to when it was written rather than rush to fix problems later.\n\nThe time commitment involved in refactoring may cause hesitation, but the impact on developer productivity and efficiency outweighs the initial discomfort. When developers take the time to refactor, they continually maintain a tidy source code so that other developers can easily deliver without running into problems. Refactoring helps create a culture of shared responsibility, trust, and collaboration.\n\nWith refactoring, the QA and debugging stages are simpler, since there is more cohesion to the overall code. Furthermore, software assets can be extended for years, allowing users to experience prolonged value rather than dealing with an unusable system. \n\n## What’s next for your team?\n\nAgile techniques have the power to transform your team’s culture, sparking seamless delivery, innovation, and collaboration. Depending on your team’s challenges, there’s a technique to help your team through it. With [pairing sessions](/blog/agile-pairing-sessions/), your developers can bridge knowledge gaps, increase communication, and develop solutions to challenging problems. By [strengthening group development](/blog/how-to-strengthen-agile-teams-with-tuckmans-model/), you can help your team rebuild after breaking down silos. When your team [embraces an Agile mindset](/blog/agile-mindset/), they’re more flexible and can easily adapt to changes. Refactoring is one step in a journey to help your team cultivate a strong Agile culture. \n\nCover image by [Barn Images](https://unsplash.com/@barnimages?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/t5YUoHW6zRo)\n{: .note}\n",[744,696,9],{"slug":876,"featured":6,"template":699},"agile-for-developers-refactor-code","content:en-us:blog:agile-for-developers-refactor-code.yml","Agile For Developers Refactor Code","en-us/blog/agile-for-developers-refactor-code.yml","en-us/blog/agile-for-developers-refactor-code",{"_path":882,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":883,"content":889,"config":894,"_id":896,"_type":13,"title":897,"_source":15,"_file":898,"_stem":899,"_extension":18},"/en-us/blog/agile-mindset",{"title":884,"description":885,"ogTitle":884,"ogDescription":885,"noIndex":6,"ogImage":886,"ogUrl":887,"ogSiteName":685,"ogType":686,"canonicalUrls":887,"schema":888},"What is an Agile mindset?","Learn how embracing change can help you speed up software delivery.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680634/Blog/Hero%20Images/agilemind.jpg","https://about.gitlab.com/blog/agile-mindset","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What is an Agile mindset?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2019-06-13\",\n      }",{"title":884,"description":885,"authors":890,"heroImage":886,"date":891,"body":892,"category":718,"tags":893},[852],"2019-06-13","\n\n\nEnsuring [Agile](/solutions/agile-delivery/) teams use the most [effective strategies](/solutions/agile-delivery/) to reduce cycle time is a\npriority for IT leaders, but what good is a menagerie of techniques if a team’s\napproach to software development doesn’t spark innovation? When it comes to\nbuilding the foundation for accelerating delivery, IT leaders have been incorrectly\nplacing emphasis on collecting tools rather than developing an Agile mindset.\n\n> “The core of Agile is recognizing that we need to get to and maintain an Agile mindset. **If I have an organization with an Agile mindset, and really rock-solid product management, Agile processes and tools will evolve out of that. If you have the Agile mindset and an awesome connection with your customers and are solving their problems, things will evolve in the right way.** You won’t even realize you’re being Agile. It’s just good business.” — [Todd Little](https://www.forbes.com/sites/stevedenning/2016/06/07/the-key-missing-ingredient-in-the-agile-manifesto-mindset/#4fa5917467ff), CEO Lean Kanban\n\nThere are many definitions of an Agile mindset, but the general consensus is that it:\n\n* Views setbacks as learning opportunities\n* Embraces iteration, collaboration, and change\n* Focuses on delivering value\n\n## Agile mindset characteristics\n\nThere’s no definitive list of what makes up an Agile mindset, but with the\nintention of getting you started, here are a few of the most widely accepted\ncharacteristics. Based on your team’s dynamics, your organization’s culture, and\nyour goals, you may adopt other attributes to help your team accelerate delivery.\n\n### Setbacks are learning opportunities\n\nEmpower your team to experiment and be creative so that rather than view a setback\nas a failure, they’ll see it as an opportunity to learn and grow. When your team\nhas the freedom to be innovative – without fear – they’re more likely to solve\nproblems and add to the knowledge base of what works and what doesn’t.\nTaking risks shouldn’t be a rebellious endeavor — it should be your team’s norm.\n\n### Agile values and principles: Iteration, collaboration, and change\n\n**Iteration**: Instill the belief that there’s always room for improvement and\nthat anyone can propose a change or idea. At GitLab, we believe\n[everyone can contribute](/company/mission/#mission) and that [iteration is the fastest\nway to feedback](https://handbook.gitlab.com/handbook/values/#iteration), helping us course correct and\ncreate new features.\n\n**Collaboration**: Finding ways to improve and increase cross-collaboration\nenables frictionless handoffs, helps relieve the burden on teams, and facilitates\na culture of trust and communication. Whether you develop new workflows or use\ndifferent tools, keep an eye out for silos which can work against collaboration.\n\n**Change**: Agile methodology is founded on the ability to adapt to\nunpredictability. If your customers or organization want to pivot soon after a\ndirection is set, your team should be able to do just that. Any\nprocesses or roadblocks that prevent your team’s ability to be flexible and\nembrace change should be removed.\n\n### Deliver value\n\nWe can all agree that teams should deliver value both to customers and the\norganization. But where an Agile mindset makes all the difference is shifting the\nemphasis from the output, which focuses only on the items delivered, to the\noutcome, which is how a feature meets a market need. An Agile mindset helps teams\ncreatively think of how a feature can solve a problem rather than feel pressured\nto deliver a set number of items in a month. It’s the whole “quality over quantity” idea.\n\n## Steps to shift to an Agile mindset\n\nChanging your team’s perspective and the way they approach problems is a difficult\nundertaking. You’re challenging their long-held beliefs while requiring them to\ncomplete tasks and meet deadlines. This is an uncomfortable process in any\nenvironment, but especially in the workplace where an (in)ability to quickly\nshift can impact performance and reputation. Fortunately, there are a few\nmethods to help you navigate these difficulties and enable your team to smoothly\nadopt an Agile mindset:\n\n\n1. **Model behavior**: The most effective way to help your team shift to an Agile\nmindset is to exemplify the behaviors you want to see. To create a\n“no-fault, embrace risk” environment, share your setbacks with the team and tell\nthem what you learned. When someone experiments, praise them for trying something\nnew and discuss the biggest lessons learned. By being transparent and showing your\nteam that this new way of thinking is possible, you become their collaborator.\n1. **Storytelling**: Share how other organizations or teams have benefited from\nan Agile mindset. Understanding what others gained from a new way of\nthinking can help your team feel more enthusiastic about the change.\n1. **Take small steps**: After doing more research about an Agile mindset, you\nmight get excited and feel tempted to change things overnight. Take small steps\nand make minor adjustments in the beginning to help your team acclimate.\n\n## What's the impact?\n\nWith an Agile mindset, teams can quickly adjust to changing market needs, respond\nto customer feedback, and deliver business value. Adopting a new perspective can\npositively change a team’s culture, since the shift permits innovation without fear,\ncollaboration with ease, and delivery without roadblocks.\n\nCover image by [Benjamin Voros](https://unsplash.com/@vorosbenisop?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/X4bgpcGBNAY)\n{: .note}\n",[744,696,722,9],{"slug":895,"featured":6,"template":699},"agile-mindset","content:en-us:blog:agile-mindset.yml","Agile Mindset","en-us/blog/agile-mindset.yml","en-us/blog/agile-mindset",{"_path":901,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":902,"content":908,"config":913,"_id":915,"_type":13,"title":916,"_source":15,"_file":917,"_stem":918,"_extension":18},"/en-us/blog/agile-pairing-sessions",{"title":903,"description":904,"ogTitle":903,"ogDescription":904,"noIndex":6,"ogImage":905,"ogUrl":906,"ogSiteName":685,"ogType":686,"canonicalUrls":906,"schema":907},"Improving pair programming with pairing sessions","Pairing with a teammate can increase delivery. Here we look at what pairing sessions are, what they involve and what they're good for.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665897/Blog/Hero%20Images/incrementalcodedevelopment.jpg","https://about.gitlab.com/blog/agile-pairing-sessions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Improving pair programming with pairing sessions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2019-08-20\",\n      }",{"title":903,"description":904,"authors":909,"heroImage":905,"date":910,"body":911,"category":718,"tags":912},[852],"2019-08-20","\nArya and Sansa. Han and Chewbacca. Harry and Ron. When people team up, great things can happen.\n\n## What is pair programming?\n\nPair programming, an Agile approach to software development, involves two programmers working together at the same workstation. One programmer (called the driver) writes code while the other programmer (called the navigator) reviews code in real time. Pairing sessions can accelerate [Agile delivery](/solutions/agile-delivery/), because teammates work together to find the best solutions to several challenges. \n\nRather than working in silos, team members work together to share knowledge and quickly move through obstacles. Sounds good, right? Well, some organizations view pair programming as an inefficient use of time. After all, why should two developers work on the same piece of code when there’s a mountain of technical debt, an impending release, and lingering OKRs around the corner?\n\n## How to get started with pair programming\n\nThe key to any successful paired programming partnership is open communication and creating a plan together so you can avoid bottlenecks during the project process. \n\nHere are a few things you need to consider as a team before beginning any coding work:\n\n* Have a mutual understanding of what “ready” looks like for this project. Consult each other as well as any stakeholder involved, like a product owner, so that everyone is clear on when to give the projects a final green light. \n* Create a step-by-step project plan. Consider how you will trade off coding and reviewing responsibilities, how you want to handle testing, and any other external help you may need to complete the project. \n* Brainstorm as many potential roadblocks as you can think of in this planning process, and try to come up with potential solutions. You can brainstorm together on paper, talk it out, or go off separately and then share thoughts, but this is an important step. Always be prepared!\n* Agree on the technology you want to use. From computers and keyboards to reliable wifi or a whiteboard, make sure you have all of the tools you need.\n\n## Some pair programming best practices\n\nTo achieve the best outcome of your pair programming experience, we recommend you follow these best practices:\n\n* **ABC (Always be communicating).** Regardless of whether you’ve worked well together in the past or you’re a brand new partnership, the importance of communication can’t be overstated. Two individuals are likely to have different thoughts and opinions along the way. To keep the project (and yourselves) from suffering, establish open and frequent communication practices early.\n* **Take turns.** No single person has to be the only one navigating or driving, and you shouldn’t. Take turns in each role as often as you need to make sure your minds and eyes stay fresh and you keep producing quality work.\n* **Take a break.** Rome wasn’t built in a day, and neither was coding. You and your pair programming partner need to make sure to take breaks so as not to induce burnout. \n* **Get good technology tools.** And remember to click that video on. Oftentimes, pair programming is done remotely. It can help to have an actual facetime conversation, even if it’s virtual, to stay connected and communicative throughout the course of the partnership. \n* **Ask for help.** If there is a part of your project that both of you don’t understand, ask for clarification. Better to ask ahead of project completion than after. \n\n## The case for and against pair programming\n\nThere are benefits and drawbacks to pairing sessions, so a few GitLab team members\nshared their thoughts to help you determine whether pair programming is right for you.\n\n> “I've done pair programming in the past. I love it because it helps to bounce\nideas off people, and I find we often could solve ‘bigger’ problems faster. To me,\nthe downside is measuring/proving that this is a good method of programming since\nmany people see this as inefficient (two people working on the same problem).” –\n[Cynthia Ng](/company/team/#TheRealArty), senior support agent\n\nToday’s developer feels the pressure of delivering at rapid speeds. Sometimes, a\nchallenge is just too complex for one person to solve, and pairing sessions can\nhelp alleviate the difficulties experienced when racing towards a release while\ncarrying a burdensome issue. Talking through solutions and drawing on each other’s\nexperiences can help a pair work towards a new approach.\n\nMeasuring the effectiveness of pairing sessions might be difficult, but there are ways to\nevaluate success. Considering failures in functionality, the number of\nbugs, and improvements in productivity can help teams determine whether pairing\nmakes a difference with delivery.\n\n### The role of engagement and continuous learning in delivery\n\nIT leaders may be reluctant to embrace pairing, since two developers dedicate\ntheir time to a single problem, but it’s important to note researchers have\nfound that\n[90% of new skills learned are lost due to lack of\nengagement](https://www.wsj.com/articles/SB10001424052970204425904578072950518558328),\nand in an Agile framework, a culture of continuous learning helps improve all aspects of delivery.\n\n> “When I was a junior developer, I found it very helpful to talk through my\nthought process and hear how senior developers approached the same problem. But,\nas an introvert, I found it exhausting to do all day, every day.” –\n[Jennie Louie](/company/team/#jennielouie), test automation engineer, Enablement\n\nAgile models often include the value of continuous learning to help everyone –\nfrom C-level to junior level – develop new skills to remain adaptable and productive.\nPairing sessions provide a platform from which teammates can learn in tandem.\n\n> “I’ve never done ‘strict’ pairing with a driver/navigator, only the relaxed kind\nwhere you just chat and sometimes switch keyboards. And while I can't really imagine\npairing full-time, I guess with the right pair and some practice it could indeed be\na great experience.” – [Markus Koller](/company/team/#toupeira), backend engineer, Create:Editor\n\nThe drawbacks to pair programming might make you hesitate, but I encourage you to\ntake a chance on it, especially if you want to accelerate delivery. Here are a\nfew pros and cons of pairing to help you understand the process:\n\n### Advantages of pair programming\n\nDirectly collaborating with a teammate can increase morale and inject fun and\ndiversity in one’s day. By working alongside each other, teammates can learn\ndifferent coding practices, workflow techniques, and new ways of approaching\nproblems, which increases innovation and efficiency and decreases knowledge silos.\n\n> “Pair programming can be great for onboarding, mentoring, and [rubber ducking](https://en.wikipedia.org/wiki/Rubber_duck_debugging)\ndifficult problems, since teammates receive immediate\nfeedback.” – [Andrew Kelly](https://gitlab.com/ankelly), senior security engineer, [Application Security](/topics/devsecops/)\n\nJunior developers benefit when pair programming with senior developers, since they’ll\ngain strong industry knowledge. Meanwhile, senior developers get teaching experience\nand the ability to think critically about solutions.\n\n> “Programming is fairly abstract. When you have to explain a concept verbally, it\noften makes you realize you're missing pieces or that there are better\nways to solve problems than your initial idea.” – [Brandon Lyon](/company/team/#brandon_m_lyon), marketing web developer/designer\n\nRegardless of experience level, everyone can benefit from pairing sessions, since\nthere is no right answer in programming. I consider software development a multi-faceted\nendeavor in which imagination and creativity are driving forces. Based on knowledge,\nexperience, and learning styles, people approach some aspects of code with\na different understanding of how it ties into existing systems. When pairing, people can\ndiscuss these perspectives and assess which approach is best.\n\n### Disadvantages of pair programming\n\nPairing might sound like the solution to many of your delivery problems, but it’s\nnot all roses and rainbows.\n\nGiven the success of pairing, teammates might be tempted to join forces a little\ntoo often. Pair programming can feel inefficient if overdone or used for tasks\nsuch as boilerplate code, smaller and well-defined changes, and [yak shaving](https://www.techopedia.com/definition/15511/yak-shaving).\n\n> “Pair programming is not a silver bullet. Some software solutions just need a\nsingle person to hunker down and work it out before sharing with others.” – [Andrew Kelly](https://gitlab.com/ankelly)\n\nIf teams are just starting out with pairing, it can take practice and patience\nto be a “good pair,” which can be difficult even for experienced pair programmers.\nDo retros after a pairing session to understand what worked well, what didn’t work,\nand how you can improve future sessions.\n\n## See it in action\n\nNow that you know a bit more about pair programming, you might feel ready to take\nthe plunge. At GitLab, we 💖 pairing. Most pairing sessions occur when developers\nwork at the same station, but as an [all-remote company](/company/culture/all-remote/),\nwe’ve found ways to make it work.\n\n> “Remote pair programming can be tougher than in-person pairing. Distance plus the\ntooling isn’t always the best, but it’s not impossible.” – [Andrew Kelly](https://gitlab.com/ankelly)\n\nGitLab’s Support team created a [dedicated project and issue templates for pairing\nsessions](https://gitlab.com/gitlab-com/support/support-training/issues?label_name%5B%5D=pairing).\n\n> “In Support, we do pairing sessions (or group ‘crush sessions’) and find we often\nget through _more_ tickets when working together, so it's something we're tracking\nas a milestone for each quarter.” – [Cynthia Ng](/company/team/#TheRealArty)\n\nOver in engineering, the Frontend team has also been [experimenting with how to support\npair programming](https://gitlab.com/gitlab-org/frontend/general/issues/12). The\nteam has used VSCode live share a few times but enjoys open discussion and sending\npatches to each other.\n\n> “The best format so far is someone posts a \"🍐 request\" in the #frontend_pairs\nSlack channel – people show interest – a time is scheduled on the calendar – then\nwe do somewhat of a mob programming session.” – [Paul Slaughter](/company/team/#pslaughter), frontend engineer, Create:Editor\n\nEvery software team hears the importance of acceleration, and it can be a daunting\nthought, especially when faced with complex problems. The next time you find\nyourself dragging your fingers across the keyboard and dreading that next line of\ncode, consider pairing up with a teammate to tackle issues together.\n\n> “Pairing will look different for everyone. Anything that encourages\ncommunication, engaged knowledge sharing, and breaking our engineering silos is\ngood.” – [Paul Slaughter](/company/team/#pslaughter)\n\nCover image by [Jonathan Mast](https://unsplash.com/@jonathanmast) on [Unsplash](https://unsplash.com/photos/RW6Wz9QaoKk)\n{: .note}\n",[744,696,722,9],{"slug":914,"featured":6,"template":699},"agile-pairing-sessions","content:en-us:blog:agile-pairing-sessions.yml","Agile Pairing Sessions","en-us/blog/agile-pairing-sessions.yml","en-us/blog/agile-pairing-sessions",{"_path":920,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":921,"content":927,"config":933,"_id":935,"_type":13,"title":936,"_source":15,"_file":937,"_stem":938,"_extension":18},"/en-us/blog/align-business-strategy-and-app-delivery",{"title":922,"description":923,"ogTitle":922,"ogDescription":923,"noIndex":6,"ogImage":924,"ogUrl":925,"ogSiteName":685,"ogType":686,"canonicalUrls":925,"schema":926},"Deliver business value at the speed of business","Read here on how DevOps helps delivering business value with faster cycle times","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671258/Blog/Hero%20Images/just-commit-blog-cover.png","https://about.gitlab.com/blog/align-business-strategy-and-app-delivery","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Deliver business value at the speed of business\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Jeremiah\"}],\n        \"datePublished\": \"2019-04-23\",\n      }",{"title":922,"description":923,"authors":928,"heroImage":924,"date":930,"body":931,"category":718,"tags":932},[929],"John Jeremiah","2019-04-23","\n\nWhat’s the point of DevOps and digital transformation? Is this just another “IT project”\nwith limited business value, or will they deliver _real_ value to the business?\n\nThe goal of digital transformation is to change the business with new models,\nnew services, new value, and new ways to connect with customers. Consider the\nobservations of the\n[World Economic Forum’s Digital Transformation Initiative](http://reports.weforum.org/digital-transformation/),\nwhere they argue that a digital transformation will lead to improved customer\nexperience and outcomes, efficiencies, and business models. One of the key ways\nto enable these changes is “agile and digital-savvy leadership” and a technology\ninfrastructure that is ready to respond to changing demands.\n\n**In order to succeed in your digital transformation strategy, you must be able\nto transform your technology delivery processes and platforms.**\n\n## What’s the solution?\n\nThe good news is that many of the key techniques to facilitate faster and more\nresponsive delivery are known. For the past decade, enterprises large and small\nhave found success with adopting DevOps principles to extend Agile project\nplanning to deliver business value at the speed of business. In many ways, DevOps\nis one of the key enablers to unlock the velocity needed for delivery teams to\nrespond to rapidly changing business objectives.\n\nAny sort of IT transformation, such as DevOps, must be defined as a business\ninitiative with tangible business outcomes. However, too often, initiatives like\nAgile and DevOps are relegated to be backbench, IT-focused projects that are set\nup to fail. If your Agile or DevOps transformation project isn’t closely linked\nto business objectives, or if it doesn’t have business stakeholders, then it’s\ntime to go back to the drawing board and re-make the business case to sell the\nvision. As IT leaders, you cannot go it alone.\n\n## Taking a closer look at your value stream\n\nSo, how do you operate and stay focused on business objectives as you accelerate\napplication delivery?  I’ve heard from many customers who find their\n“portfolio planning” process and tools disconnected from the actual work developers\nand delivery teams do. The problem they face is not having visibility into the\ncadence and delivery of new features and capabilities that the business has requested.\nWhile they try to integrate disparate tools to keep track of everything, they\nultimately end up using a patchwork spreadsheets/PowerPoint hybrid to create\ndashboards and reports in the hopes of keeping executives informed. It’s a waste\nof effort, error prone, and frustrating to pull all the data together over and over again.\n\nTo solve this alignment puzzle you need three things:\n\n1. Effective visibility and traceability between business initiatives, coding, and delivery.\n1. Commitment to Agile planning and prioritization.\n1. Automation of manual, error-prone tasks, such as testing, configuring, reporting, and tracking activities.\n\nLet's dig into those:\n\n### Increase visibility\n\nThe first step in achieving success is breaking through the barriers to\ncommunication and collaboration in your organization. Too many different tools,\nspreadsheets, PowerPoint decks, and islands of information create friction and\nconfusion. You need to consider how you can align your policies, processes, and\ntechnology enablers to encourage collaboration, sharing, and visibility into business\ninitiatives. Only then will you be able to respond to the rapidly changing business needs.\n\n### Simplify workflows\n\nIf visibility is the first step in your transformation, then your second step is\nembracing the reality that yesterday’s business plans and priorities may well\nchange tomorrow. The days of annual planning and long-running projects that\ndeliver only after months of effort are gone. The pace of change in the market\ndemands a comparable level of flexibility in our planning and prioritization.\n\n### Favor automation\n\nIf your most valuable assets are your people, then don’t ask them to waste their\ntime and talents on routine manual effort. To improve your ability to accelerate\napplication delivery, you need to examine your processes and policies and\nautomate your manual, repetitive, low-value tasks. This will unlock the untapped\npotential in your team while speeding up your pipeline and reducing error rates.\nThe power of [modern automation](/blog/application-modernization-best-practices/) is a\nkey driver to deliver at the speed of business.\n\nA successful transformation is not only possible, but also crucial to long-term\nsuccess in a market that is moving at a radically faster pace than it was a few\nyears ago. Now is the time to start.\n\nJoin us for an upcoming webinar in which we'll learn how\nsoftware delivery leaders play a vital role in the success of digital transformations.\n\n[Register now](/webcast/justcommit-reduce-cycle-time/)\n{: .alert .alert-gitlab-purple .text-center}\n",[722,9],{"slug":934,"featured":6,"template":699},"align-business-strategy-and-app-delivery","content:en-us:blog:align-business-strategy-and-app-delivery.yml","Align Business Strategy And App Delivery","en-us/blog/align-business-strategy-and-app-delivery.yml","en-us/blog/align-business-strategy-and-app-delivery",{"_path":940,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":941,"content":947,"config":954,"_id":956,"_type":13,"title":957,"_source":15,"_file":958,"_stem":959,"_extension":18},"/en-us/blog/align-engineering-security-appsec-tests-in-ci",{"title":942,"description":943,"ogTitle":942,"ogDescription":943,"noIndex":6,"ogImage":944,"ogUrl":945,"ogSiteName":685,"ogType":686,"canonicalUrls":945,"schema":946},"How Developer-Centric AppSec Testing Transforms DevOps Teams","Find and fix security bugs faster by implementing developer-centric application security testing in the CI pipeline. And the bonus? Engineering and security will finally be better aligned.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681513/Blog/Hero%20Images/stackhawk.jpg","https://about.gitlab.com/blog/align-engineering-security-appsec-tests-in-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How developer-centric AppSec testing can dramatically change your DevOps team\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Joni Klippert\"}],\n        \"datePublished\": \"2020-08-21\",\n      }",{"title":948,"description":943,"authors":949,"heroImage":944,"date":951,"body":952,"category":832,"tags":953},"How developer-centric AppSec testing can dramatically change your DevOps team",[950],"Joni Klippert","2020-08-21","\n\nSoftware development has accelerated dramatically over the past decade. As [DevOps](/topics/devops/) became pervasive, companies went from shipping software monthly to shipping software to production frequently throughout the day. This happened as engineering teams took ownership of the deployment, performance, and resilience of their software. \n\nAnd it has paid off. Companies that have adopted DevOps are deploying software significantly faster, ultimately driving business value as innovation is more rapidly delivered to customers.\n\nSecurity, however, did not keep up. Security teams typically fell into one of two positions - the blocker of frequent deployments or the team perpetually bringing up issues in last month’s work. The need for a shift in the security model is widely known. It was the subject of the [2019 Black Hat Conference keynote](https://www.blackhat.com/us-19/briefings/schedule/index.html#every-security-team-is-a-software-team-now-17280), stats from GitLab’s [2020 Global DevSecOps Survey](https://about.gitlab.com/resources/downloads/2020-devsecops-report.pdf) make this obvious, and we’ve [shared our opinions](https://www.stackhawk.com/blog/application-security-is-broken/) at StackHawk.\n\nI believe there is a solution (or at least a *huge* step in the right direction)... developer-centric [application security](/topics/devsecops/) tooling in the CI pipeline.\n\n## The CI pipeline aligns engineering and security\n\nWhile some in the industry have been debating the term DevSecOps, leading companies have started adopting developer-first security tooling that brings alignment through the CI pipeline. Instrumented correctly, it ensures that security bugs are caught before they hit production and that the fix cycle is drastically shortened.\n\nThe legacy model has security teams running application security tests against production environments. These sort of checks are great if they are your backstop. But if this is the primary way of assessing your application’s security posture, you need to catch up with modern engineering practices. \n\nModern teams are running checks on each microservice that makes up the customer facing application, catching bugs in pipeline, and equipping developers with the information to self serve fixes and triage issues. Fix times are significantly shorter, as developers are still in the context of the code they were working on. By testing microservices vs. the end state application, the underlying bugs are much easier to find and fix. And with developer-centric tooling, developers can fix bugs themselves instead of cycling through siloed internal processes. This structure better aligns each function with their best skill sets. Engineers know the application the best and are most equipped to fix, and security teams are able to focus on strategy instead of Jira ticket creation.\n\nThe key is to get the instrumentation right (read: don’t break the build for stupid stuff).\n\n## Application security tests in CI\n\nThat sounds great in theory, but what does it look like in practice? Getting started is actually more simple than it seems. We suggest adding three application security tests to start:\n\n## Software composition analysis (SCA)\n\nSCA identifies the open source dependencies in your code base and compares that against a database of known security vulnerabilities. Some tools automatically create pull requests to patch outdated libraries. Open source use is exponentially growing, especially with chained dependencies. SCA is incredibly important, but also can be noisy with non-exploitable findings.\n\nSome of the leading vendors in the space are [GitLab](/) and [Snyk](https://snyk.io/), with up and comers like [FOSSA](https://fossa.com/) also worth paying attention to.\n\n## Dynamic application security testing (DAST)\n\nDAST runs security tests against your running application, from localhost to CI to production. The beauty of DAST is that it most closely resembles what an attacker would see, by attacking your running application and reducing false positives. The two things to be sure of as you start testing with DAST is that your scanner is finding all of your paths and API endpoints and that it is able to scan as an authenticated user.\n\nGitLab provides DAST checks for Ultimate tier customers. If you want more robust scanning options and additional functionality to manage and fix bugs, [StackHawk](https://www.stackhawk.com) is the only place to turn (obviously I’m biased here). Other solutions include legacy vendors such as [Rapid7](https://www.rapid7.com/) or open source leader [ZAP](https://www.zaproxy.org/).\n\n## Secrets detection\n\nFinally, you’ll want to ensure that you have detection for leaked secrets in code. This tooling looks for credentials, keys, or other secrets that may have unintentionally been committed to the code base by developers. GitLab includes [secret detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/) in their GitLab Ultimate security tooling.\n\n## Getting started\n\nOftentimes, the thought of adding application security tests to the development workflow feels insurmountable. With a long list of priorities, engineering leadership will sometimes put this off. The reality, however, is that it is not that hard.\n\nAt StackHawk, we see many customers completing their first successful scans within 15 minutes of sign up and instrumentation in CI is literally as easy as adding [a few lines of YAML](https://docs.stackhawk.com/continuous-integration/) to your build.\n\nHere is our recommended playbook of how to get started with AppSec in CI. While this is specific to StackHawk, the principles can be applied to other tools as well.\n\n### Step 1: local testing and config\nAfter signing up and grabbing your API key, start iterating on [configuration](https://docs.stackhawk.com/hawkscan/configuration/) while testing against your application on localhost. This allows you to quickly adjust config and get successful authenticated scans running.\n\n### Step 2: non-blocking CI instrumentation\nAfter you’ve ironed out the configuration locally, add the test to your CI pipeline. At this point, it is strongly recommended to instrument as a non-blocking test so that you can triage any existing findings and smooth out any kinks.\n\n#### Step 3: bug triage - fix critical issues in flight, backlog and discuss the rest\nAfter your first non-blocking CI run, start triaging any initial findings. Any bugs marked as High criticality should likely be fixed with some sense of urgency. Lows and Mediums should be triaged depending on your application and the bugs, either quickly addressed or added to a backlog for review. Existing findings should not be the blocker for you instrumenting checks to ensure that new bugs don’t get shipped to production.\n\n#### Step 4: switch to blocking tests\nAfter ironing out config locally and in CI, and then triaging initial findings, it is time to finalize the roll out. Switch the StackHawk test to blocking mode to ensure that new security bugs don’t hit production. You can set the scanner to break on High or Medium and High, which depends on your business and the nature of the application. With this in place, you can be confident that production-ready applications have been scanned for security.\n\n## Cultural shifts: it is more than CI\nThe CI pipeline is the natural hingepoint to start aligning engineering and security. A cultural shift, however, is absolutely needed. (If you're doubtful about this, here's a frank look at why [dev and sec don't get along](/blog/developer-security-divide/).) Modern engineering teams recognize that delivering a secure application is part of quality engineering. Engineers aren’t comfortable shipping applications with UI bugs, and they shouldn’t accept security holes either. \n\nSecurity, on the other hand, needs to shift from the blocker to speedy development and to the enabler of safety in an environment of high speed delivery. Modern security engineers are ensuring that their teams are working with safe-by-default frameworks, are equipped with developer-centric tooling, and that there are proper integration tests for business logic that can’t be tested by external tooling.\n\nWhile there is significant catch up needed, it is encouraging to see the leading software teams out there testing application security on every build.\n\n## Dig deeper\n\nTo learn more about adding AppSec tests to your CI build, join me at my [How Security Belongs in DevOps](https://sched.co/dUWD) talk at GitLab Commit on August 26th. You can also always sign up for a [free StackHawk trial or demo](https://www.stackhawk.com) or talk to your GitLab sales representative about the security features in GitLab Ultimate. And for the best of both worlds, check out more details on running [automated security testing with StackHawk in GitLab](https://docs.stackhawk.com/continuous-integration/gitlab.html).\n\n_Joni Klippert is founder & CEO of StackHawk, a software-as-a-service company built to help developers find and fix security vulnerabilities in their code. Joni has been building software for developers for more than 10 years, previously serving as VP Product, VictorOps from seed stage to acquisition by Splunk. Joni is a Colorado native and holds an MBA from the University of Colorado. She currently lives in Denver with her fiance Jason and Whippet \"Q\"._\n\nCover image by [Adi Goldstein](https://unsplash.com/@adigold1) on [Unsplash](https://unsplash.com)\n{: .note}\n\n\n\n",[108,696,722,787,789,9],{"slug":955,"featured":6,"template":699},"align-engineering-security-appsec-tests-in-ci","content:en-us:blog:align-engineering-security-appsec-tests-in-ci.yml","Align Engineering Security Appsec Tests In Ci","en-us/blog/align-engineering-security-appsec-tests-in-ci.yml","en-us/blog/align-engineering-security-appsec-tests-in-ci",{"_path":961,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":962,"content":968,"config":975,"_id":977,"_type":13,"title":978,"_source":15,"_file":979,"_stem":980,"_extension":18},"/en-us/blog/all-aboard-merge-trains",{"title":963,"description":964,"ogTitle":963,"ogDescription":964,"noIndex":6,"ogImage":965,"ogUrl":966,"ogSiteName":685,"ogType":686,"canonicalUrls":966,"schema":967},"How starting merge trains improve efficiency for DevOps","No more queuing and waiting for pipeline results! Read how merge trains will speed up your deployments while making sure master stays green.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678419/Blog/Hero%20Images/merge_trains.jpg","https://about.gitlab.com/blog/all-aboard-merge-trains","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How starting merge trains improve efficiency for DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Orit Golowinski\"}],\n        \"datePublished\": \"2020-01-30\",\n      }",{"title":963,"description":964,"authors":969,"heroImage":965,"date":971,"body":972,"category":832,"tags":973},[970],"Orit Golowinski","2020-01-30","\nA large percentage of a developer's day is spent updating their branches and rebasing, they are essentially \"racing\" their teammates to get their merge requests merged. Keeping the master branch green is critical for [continuous delivery](/topics/continuous-delivery/). When the production build breaks, it means your new code isn't going live, which impacts users and revenue. The only way to be 100% sure the master branch stays green when new code merges is to run the pipeline using the latest version of the master branch. For teams that have a high volume of merges, this can be difficult or even impossible. In the time it takes the pipeline to complete one code change, other changes can get merged to master with the potential for conflict. The only way to mitigate this is to queue and sequence the changes so that once a production pipeline starts, other code doesn't get merged ahead of that change. \n\n## What are merge trains and how do they help?\n\n Merge trains introduce a way to order the flow of changes into the target branch (usually master). When you have teams with a high number of changes in the target branch, this can cause a situation where during the time it takes to validate merged code for one change, another change has been merged to master, invalidating the previous merged result.\n\nBy using merge trains, each merge request joins as the last item in that train with each merge request being processed in order. However, instead of queuing and waiting, each item takes the completed state of the previous (pending) [merge ref](https://gitlab.com/gitlab-org/gitlab-foss/issues/47110) (the merge result of the merge), adds its own changes, and starts the pipeline immediately in parallel under the assumption that everything is going to pass.\n\nIf all pipelines in the merge train are completed successfully, then no pipeline time is wasted on queuing or retrying. Pipelines invalidated through failures are immediately canceled, the MR causing the failure is removed, and the rest of the MRs in the train are requeued without the need for manual intervention.\n\nAn example of a merge train:\n\n![Diagram of merge trains](https://about.gitlab.com/images/blogimages/merge_trains-1.png){: .shadow}\n\nMR1 and MR2 join a merge train. When MR3 attempts to join, the merge fails and it is removed from the merge train. MR4 restarts at the point that MR3 fails, and attempts to run without the contents of MR3.\nMR3 will remain open in failed state, so that the author can rebase and fix the failure before attempting to merge again.\n\nHere is a demonstration video that explains the advantage of the merge train feature. In this video, we'll simulate the common problem in a workflow without merge trains, and later, we resolve the problem by enabling a merge train.\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/D4qCqXgZkHQ\" frameborder=\"0\" allowfullscreen=\"true\">\n\u003C/iframe>\n\u003C/figure>\n\n## How the merge trains feature has evolved so far\n\nAfter releasing [merge trains](/releases/2019/06/22/gitlab-12-0-released/#sequential-merge-trains) in GitLab 12.0, we immediately started to use this feature internally, and collected a lot of valuable feedback which helped us to improve and enhance the feature.\n\nWe started by tuning the [merge train concurrency](https://gitlab.com/gitlab-org/gitlab/issues/31692). We understood that while merge trains is a feature that is designed to improve efficiency by making sure that master stays green, it can also create an unwanted bottleneck that slows down productivity if your merge requests needs to wait in a long queue in order to get merged.\n\nWe also noticed that many developers were \"skipping the line\" and merging their changes immediately because they did not understand the effect that merging immediately has on other users, so we added a [warning](https://gitlab.com/gitlab-org/gitlab/issues/12679) to clarify this common misunderstanding. We intentionally left the option to still \"merge immediately\" since we also understand the importance of an urgent merge request, such as a \"hot fix\" that must be able to skip to the front of the merge train. Another improvement was the ability to [“squash & merge” as part of the merge train](https://gitlab.com/gitlab-org/gitlab/issues/13001) in order to maintain a clean commit history.\n\nHere is a demonstration video that explains how squash & merge works with merge trains.\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/pA5SfHwlq0s\" frameborder=\"0\" allowfullscreen=\"true\">\n\u003C/iframe>\n\u003C/figure>\n\n## What's next\n\nWe plan to add more important features to the support of merge trains. The first is that [merge trains should support fast-forward merge](https://gitlab.com/gitlab-org/gitlab/issues/35628). This could help solve a fundamental contention problem of fast-forward merges: The CI pipeline must be run every time the merge request is rebased, and the merge request must be rebased every time master changes – which is frequently! This problem significantly limits the frequency with which merge requests can be merged.\n\nThe second feature, [API support for merge trains](https://gitlab.com/gitlab-org/gitlab/issues/32665), will extend the ability to automate your workflows using merge trains.\n\nWe want to hear from you! Tell us how merge trains have improved your workflow, or give us more insight into how we can improve merge trains to work better for you. [Give us your feedback by commenting here](https://gitlab.com/groups/gitlab-org/-/epics/2408).\n\nCover image by [Vidar Nordli-Mathisen\n](https://images.unsplash.com/photo-1525349769815-0e6ba4e0bbdd?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1611&q=80) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[108,974,9,834],"demo",{"slug":976,"featured":6,"template":699},"all-aboard-merge-trains","content:en-us:blog:all-aboard-merge-trains.yml","All Aboard Merge Trains","en-us/blog/all-aboard-merge-trains.yml","en-us/blog/all-aboard-merge-trains",{"_path":982,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":983,"content":989,"config":994,"_id":996,"_type":13,"title":997,"_source":15,"_file":998,"_stem":999,"_extension":18},"/en-us/blog/an-ode-to-stable-counterparts",{"title":984,"description":985,"ogTitle":984,"ogDescription":985,"noIndex":6,"ogImage":986,"ogUrl":987,"ogSiteName":685,"ogType":686,"canonicalUrls":987,"schema":988},"An ode to stable counterparts","Our workflow model streamlines decision making, cultivates trust, and promotes cross-functional collaboration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679002/Blog/Hero%20Images/stablecounterparts.jpg","https://about.gitlab.com/blog/an-ode-to-stable-counterparts","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"An ode to stable counterparts\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-10-16\",\n      }",{"title":984,"description":985,"authors":990,"heroImage":986,"date":991,"body":992,"category":718,"tags":993},[852],"2018-10-16","\n_They said [this model](/handbook/leadership/#stable-counterparts) would help us thrive._\n_To foster trust, familiarity, and drive,_\u003Cbr/>\n_We would work side-by-side, knitting our workflows_\u003Cbr/>\n_And supporting one another in our highs and lows._\u003Cbr/>\n\n_Before we embarked on our journey, I fretted and fussed._\u003Cbr/>\n_With a furrowed brow, I felt a careful trust_\u003Cbr/>\n_In my leadership who often discussed_\u003Cbr/>\n_The need to readjust lest we combust._\u003Cbr/>\n\n_We shipped and scaled and detailed_\u003Cbr/>\n_Our results._\u003Cbr/>\n_Seamlessly soaring towards Two and Twenty,_\u003Cbr/>\n_Our managers said, “In their progress, that_\u003Cbr/>\n_team exults.”_\u003Cbr/>\n_We collaborate, update, and accelerate with flair._\u003Cbr/>\n\n_And now I must declare:_\u003Cbr/>\n_I have drawn the ace of hearts_\u003Cbr/>\n_With my team of stable counterparts!_\u003Cbr/>\n\nAt GitLab, we adopted a stable counterparts model to facilitate cross-functional\nconnections in the hope that working with the same people would increase the\nspeed of communication, build trust, and encourage iteration. In a stable\ncounterparts model, every team works with the\n[same team members](/handbook/engineering/development/dev/create/source-code-be/#stable-counterparts),\nincluding frontend engineers, UX designers, and test automation engineers, for\neach release, creating a smaller team within GitLab.\n\n## The benefits of stable counterparts\n\nThe ability to build long-term relationships is the foundational benefit of\nhaving stable counterparts. Repeated interactions helps us understand personal\nworkflows and communication styles, so we know how to most effectively work with\nour counterparts. Knowing how to best communicate with someone is a great benefit\nwhen working in high pressure situations or resolving conflict. Consistent\ncollaboration means faster results and more efficient processes.\n\nIn addition to building long-term relationships, we’ve noticed a few other\ninteresting benefits to having stable counterparts.\n\n- **Enabling a faster workflow**: There are some product areas that are easy to\nunderstand because every team member engages with them, but there are some that\nare challenging, such as [CI](https://docs.gitlab.com/ee/ci/),\n[security](https://docs.gitlab.com/ee/user/project/merge_requests/#security-reports),\nor [Kubernetes](https://docs.gitlab.com/ee/user/project/clusters/index.html),\nthat require domain knowledge that can be harder for a team member to quickly\nfathom without a certain amount of pre-knowledge. When a stable counterpart has\ndeveloped deeper understanding in complex areas, others know who to quickly\nconsult when confronted with a specific technical challenge, an insight that\ndrives velocity since team members are no longer blocked trying to determine who\ncan offer assistance.\n- **Promoting long-term brainstorming**: In traditional workflow models, product\nmanagers often have individual meetings with engineering managers, UX designers,\nand frontend managers in which brainstorming through ideas and talking about\nlong-term goals happens in silos. With stable counterparts, discussion benefits\nfrom cross-functional perspective, enhancing ideas, and igniting creativity,\nwhich can take place over several milestones.\n- **Increasing familiarity and comfort with problems**: Working with a rotating\nset of team members can result in a lack of comprehensive historical knowledge\non an issue, causing delays while team members digest information and become\nacquainted with the state of a solution. By working with the same people over\nseveral releases, we’re able to provide context early and implement learnings\nto solve problems in the right way.\n\n## Let’s talk about workflow impact\n\nWorking with stable counterparts has helped the team develop a faster and more\niterative workflow. We’re more focused in that we can pick up on discussions and\nitems that we tinkered with in previous releases. We now approach problems with\na deeper understanding, since we have long-term insight into why changes are\nimportant. Taking context from release to release and retaining that knowledge\nensures that we develop thoughtful solutions, especially since we feel a higher\nsense of ownership of projects because we’ve been involved throughout every stage.\n\nThis model has also resulted in better dependency management. We spend a lot of\ntime doing upfront investment into project planning and prioritization, so teams\nhave visibility into collaboration with backend and frontend. This makes it\neasier to see whether we need more backend or frontend resources in certain areas\nand to allocate engineers as needed.\n\n## Sounds great, but what are the drawbacks?\n\nThis model could lead to engineers feeling like they’re feature factories, so\nleadership must actively work to keep their team on an edge so that there’s a\nhealthy balance between product features and other tasks that are more complex\nor exciting.\n\nWhen working with stable counterparts, there’s a potential for conflict and\npersonality issues. If personal communication styles or workflows don’t align,\ninteractions can become tense and handoffs can be fraught with friction. When\npairing stable counterparts, leadership should consider personalities,\ncommunication styles, and workflows to ensure that a team, at baseline, can work\nwell together.\n\nWorking with the same people for too long means that we’re not exposed to a\nbroader audience and may not have fresh ideas come into conversations. It’s\npossible that teams become comfortable with the way things are and ideas are no\nlonger questioned. We haven’t encountered this problem at GitLab yet, since we’re\n[growing](/jobs/) so quickly that every team frequently has a change or new addition,\nwhich is accompanied by a variety of new questions and unique feedback. For\nteams that don’t have as much growth, it can be useful to invite other team\nmembers to provide perspective and question long-held beliefs.\n\n## Advice for other teams\n\nIf your team is interested in adopting a similar model, we suggest starting\nsmall and breaking teams into smaller components. For teams that are unaccustomed\nto an interdisciplinary model with agile teams, it can be a difficult adjustment,\nso it’s important that teams are structured around either a specific initiative\nor area of the product. To determine whether this is a model that could benefit\nyour organization, consider selecting a problem and pairing the same 4-5 team\nmembers, including a product manager, a UX designer, and a few engineers, for\nseveral releases until the problem is solved. Working together for several\nreleases helps team members nurture a strong, stable relationship, so it’s\nimportant that they’re given enough time to learn about and from each other.\n\nAlthough stable counterparts has worked well for GitLab’s workflow, it’s\nimportant to be sure that this is the model that fits _your_ company’s needs.\nDeveloping a workflow depends on strategy, targets, and the maturity level of an\norganization. These are all variables that need to be considered when building\nor changing a process. This setup wouldn’t have worked for GitLab 12 months ago,\nbut it works now, so continue to experiment and examine options as your team and\norganization develop. Whether you pursue a stable counterparts model or some other\nsetup, remember to select an approach that complements your organization and the\nproduct you’re building.\n\n_The writer is grateful to [Jeremy Watson](/company/team/#d3arWatson),\n[Liam McAndrew](/company/team/#lmcandrew), [John Jeremiah](/company/team/#j_jeremiah), and\n[Tim Zallman](/company/team/#tpmtim) for sharing their experiences as stable counterparts._\n",[9,790,696],{"slug":995,"featured":6,"template":699},"an-ode-to-stable-counterparts","content:en-us:blog:an-ode-to-stable-counterparts.yml","An Ode To Stable Counterparts","en-us/blog/an-ode-to-stable-counterparts.yml","en-us/blog/an-ode-to-stable-counterparts",{"_path":1001,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1002,"content":1008,"config":1015,"_id":1017,"_type":13,"title":1018,"_source":15,"_file":1019,"_stem":1020,"_extension":18},"/en-us/blog/atlassian-acquires-agilecraft",{"title":1003,"description":1004,"ogTitle":1003,"ogDescription":1004,"noIndex":6,"ogImage":1005,"ogUrl":1006,"ogSiteName":685,"ogType":686,"canonicalUrls":1006,"schema":1007},"What’s your plan?"," GitLab integrates planning every step of the way","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680500/Blog/Hero%20Images/planpost.jpg","https://about.gitlab.com/blog/atlassian-acquires-agilecraft","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What’s your plan?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2019-03-18\",\n      }",{"title":1003,"description":1004,"authors":1009,"heroImage":1005,"date":1011,"body":1012,"category":300,"tags":1013},[1010],"GitLab","2019-03-18","\n\nToday’s acquisition of AgileCraft by Atlassian brings up an interesting discussion: \nWhat’s the role of planning in today’s fast-moving software development lifecycle?\n\nIn DevOps, planning can’t be an after-thought or something only thought about at \nthe beginning. [Planning needs to be agile](https://about.gitlab.com/solutions/agile-delivery/), \nand integrated into what’s happening every day in the modern software shop. \nLike a quote from the Beatles song, “Life is what happens to you while you’re busy making other plans.”\n\nGitLab has democratized planning, making it an integral part of the software \ndevelopment workflow, with out-of-the-box project management, kanban boards, \nepics, time-tracking, and agile portfolio management - with \n[much more to come](https://about.gitlab.com/direction/plan/). \nMore importantly, though, GitLab’s planning features are intimately linked to \nall of the other [stages of software development](https://about.gitlab.com/stages-devops-lifecycle/). \nDevelopers, architects, and product managers can plan and re-plan together, \ncollaboratively and concurrently, with full visibility to the entire cycle.\n\nDon’t get us wrong - AgileCraft is a deep, well-thought out enterprise planning \ntool. But with GitLab, in addition to planning, you get an entire software development lifecycle tool out of the box.\n\n",[744,1014,9],"news",{"slug":1016,"featured":6,"template":699},"atlassian-acquires-agilecraft","content:en-us:blog:atlassian-acquires-agilecraft.yml","Atlassian Acquires Agilecraft","en-us/blog/atlassian-acquires-agilecraft.yml","en-us/blog/atlassian-acquires-agilecraft",{"_path":1022,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1023,"content":1029,"config":1036,"_id":1038,"_type":13,"title":1039,"_source":15,"_file":1040,"_stem":1041,"_extension":18},"/en-us/blog/automate-tedious-coding-tasks-with-gitlab-duo-workflow",{"title":1024,"description":1025,"ogTitle":1024,"ogDescription":1025,"noIndex":6,"ogImage":1026,"ogUrl":1027,"ogSiteName":685,"ogType":686,"canonicalUrls":1027,"schema":1028},"Automate tedious coding tasks with GitLab Duo Workflow","See how agentic AI can reduce time spent on repetitive tasks, freeing you up to focus on developing innovative solutions and shipping the next big thing.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662465/Blog/Hero%20Images/GitLab_Duo_Workflow_Unified_Data_Store__1_.png","https://about.gitlab.com/blog/automate-tedious-coding-tasks-with-gitlab-duo-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automate tedious coding tasks with GitLab Duo Workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jeff Park\"}],\n        \"datePublished\": \"2025-05-06\",\n      }",{"title":1024,"description":1025,"authors":1030,"heroImage":1026,"date":1032,"body":1033,"category":764,"tags":1034},[1031],"Jeff Park","2025-05-06","Working with large codebases often means spending significant time on repetitive tasks that, while necessary, don't really push your projects forward. The good news is that these tasks are great candidates to be completed with AI. Reducing the time spent on them will free you up to work on more important problems that you’re actually excited to tackle. With GitLab Duo Workflow, the time spent on these tasks will go from hours to minutes.\n\n[Duo Workflow](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/) is a powerful new agentic solution, currently in private beta, that lives in VS Code and is designed to help you complete complex development tasks. While many AI coding assistants are focused on helping developers write code, Duo Workflow understands your project structure, reads your files, and can make coordinated changes across your entire codebase.\n\nI created a demonstration that showcases how Duo Workflow can transform a tedious coding task into a streamlined process that saves you time and mental energy.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1081627484?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Automate tedious coding tasks with GitLab Duo Workflow\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## The challenge: Implementing a new lint rule\n\nIn this demo, we tackle a common scenario that many developers face: implementing a new lint rule and then updating multiple files across the codebase to comply with this rule. The specific issue involves validation errors occurring in several project files that need to be addressed consistently.\n\nRather than manually identifying and modifying each affected file one by one – a process that could take hours depending on the size of your codebase – we'll see how Duo Workflow can:\n\n1. Read and understand the details from an issue\n2. Analyze the project structure to identify affected files\n3. Create a comprehensive plan to implement the necessary changes\n4. Draft a new lint rule to prevent future occurrences\n5. Make consistent code changes across all relevant files\n6. Stage the changes for your review before any commits are made\n\nA simple prompt initiates the process:\n\n\"Read through issue #1 in this project and submit code changes to resolve it. Be sure to look at each tool file and make all appropriate changes.\"\n\nFrom there, Duo Workflow takes over – reading the issue, analyzing the files, creating a plan, and implementing the solution – all while keeping me informed of its progress and reasoning.\n\n## Why this matters for your development process\n\nWhat's particularly powerful about Duo Workflow is how it maintains awareness of this wider context throughout the entire process. It's not just making text replacements based on a large language model's training data – it's understanding the code, making intelligent decisions, and proposing a complete solution that you maintain full control over.\n\nThis approach offers several key benefits:\n\n* **Consistency in implementation:** Apply changes uniformly across files\n* **Time savings:** Focus your energy on creative problem-solving rather than repetitive tasks\n* **Reduced context switching:** Complete complex tasks without leaving your IDE\n* **Keeping a human in the loop:** Review all proposed modifications before committing\n\n## What's next\n\nGitLab Duo Workflow is part of our work to bring AI-powered capabilities to every stage of the software development lifecycle. While this demo focuses on code editing, the same approach can be applied to various development tasks:\n\n* Implementing new features based on issue descriptions\n* Fixing bugs with comprehensive test coverage\n* Refactoring legacy code to modern standards\n* Creating documentation from codebase analysis\n\nWe believe that by automating repetitive tasks, Duo Workflow helps you focus on what matters most – solving interesting problems and creating innovative solutions for your users.\n\n> GitLab Duo Workflow is currently available in private beta for GitLab Ultimate customers. [Sign up for the waitlist today!](https://about.gitlab.com/gitlab-duo/agent-platform/)\n\n## Learn more\n- [Use GitLab Duo Workflow to improve application quality assurance](https://about.gitlab.com/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance/)\n- [Solving complex challenges with GitLab Duo Workflow](https://about.gitlab.com/blog/solving-complex-challenges-with-gitlab-duo-workflow/)\n- [GitLab Duo Workflow: Enterprise visibility and control for agentic AI](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/)\n- [Emerging agentic AI trends reshaping software development](https://about.gitlab.com/the-source/ai/emerging-agentic-ai-trends-reshaping-software-development/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n",[766,495,834,767,1035,9],"tutorial",{"slug":1037,"featured":90,"template":699},"automate-tedious-coding-tasks-with-gitlab-duo-workflow","content:en-us:blog:automate-tedious-coding-tasks-with-gitlab-duo-workflow.yml","Automate Tedious Coding Tasks With Gitlab Duo Workflow","en-us/blog/automate-tedious-coding-tasks-with-gitlab-duo-workflow.yml","en-us/blog/automate-tedious-coding-tasks-with-gitlab-duo-workflow",{"_path":1043,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1044,"content":1049,"config":1054,"_id":1056,"_type":13,"title":1057,"_source":15,"_file":1058,"_stem":1059,"_extension":18},"/en-us/blog/avoiding-foreclosure-on-your-technical-debt",{"title":1045,"description":1046,"ogTitle":1045,"ogDescription":1046,"noIndex":6,"ogImage":924,"ogUrl":1047,"ogSiteName":685,"ogType":686,"canonicalUrls":1047,"schema":1048},"How to avoid foreclosure on your technical debt","There’s no need to be embarrassed — we all have technical debt. Here’s how you pay it off.","https://about.gitlab.com/blog/avoiding-foreclosure-on-your-technical-debt","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to avoid foreclosure on your technical debt\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Jeremiah\"}],\n        \"datePublished\": \"2019-04-29\",\n      }",{"title":1045,"description":1046,"authors":1050,"heroImage":924,"date":1051,"body":1052,"category":718,"tags":1053},[929],"2019-04-29","\n\nHow much debt can you afford? We all live with some form of debt, whether it’s a student loan, a car loan, a mortgage, or a credit card balance. Debt doesn’t have to be a bad thing. It can be a tool that gives us leverage and flexibility to make large purchases. But there are limits to how much debt is reasonable and that’s where people get into trouble. If they take on too much debt, bad things can happen. \n\nWhat does this have to do with GitLab? Everything.\n\n## What is technical debt?\n\nAccording to [Martin Fowler’s excellent summary on technical debt](https://martinfowler.com/bliki/TechnicalDebt.html), it seems that [Ward Cunningham coined the term](https://www.youtube.com/watch?v=pqeJFYwnkjE) around 1993 as a metaphor to describe a typical pattern that occurs on software projects. Technical debt is a pattern in which a development team does not have enough time, information, or capacity to refine and refactor their code, so their architecture, implementation, and testing may be incomplete. The challenge with technical debt is similar to financial debt in that it doesn’t magically go away. Unless it is managed and paid down, technical debt will grow over time, just like the balance on your credit card bill.\n\n## How to reduce technical debt\n\nYou may feel overwhelmed but there is a reason to be optimistic. The power of rapid, continuous delivery combined with small, incremental changes can help you manage your technical debt and avoid “foreclosure.” Here are three things you can do today to get your “technical finances” in order:\n\n1. **Find your technical debt and document it.**  It’s hard to pay off all your bills if you don’t know what they are. Begin this process by creating a list of issues that capture your specific technical “bills.” Assign them `technical debt` and `priority` labels. You probably won’t be able to pay them all off at one time, but now you know where to start.\n\n1. **Embrace small changes.**  At GitLab, we embrace [Minimum Viable Change (MVC)](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc). The goal of MVC is to make small, incremental improvements. Your goal is to pay down your debt one micro payment at a time.  \n\n1. **Let continuous delivery automate your payments.**  You can’t automate the improvements, but you can leverage CI/CD automation to streamline the process of testing, validating, and deploying code changes for you. Continuous delivery removes the friction and bottlenecks between your developers and the “bank.”  \n\nTechnical debt is a reality in almost every software product in the world. The point about technical debt isn’t how to avoid it, but how to _manage_ it so that you’re not in a situation where you are forced to foreclose on your project because the technical debt is out of control. The tools are readily available. The question is: Are you ready to start managing your technical debt? [Just commit](/blog/strategies-to-reduce-cycle-times/) to your future.\n",[722,9],{"slug":1055,"featured":6,"template":699},"avoiding-foreclosure-on-your-technical-debt","content:en-us:blog:avoiding-foreclosure-on-your-technical-debt.yml","Avoiding Foreclosure On Your Technical Debt","en-us/blog/avoiding-foreclosure-on-your-technical-debt.yml","en-us/blog/avoiding-foreclosure-on-your-technical-debt",{"_path":1061,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1062,"content":1068,"config":1075,"_id":1077,"_type":13,"title":1078,"_source":15,"_file":1079,"_stem":1080,"_extension":18},"/en-us/blog/battling-toolchain-technical-debt",{"title":1063,"description":1064,"ogTitle":1063,"ogDescription":1064,"noIndex":6,"ogImage":1065,"ogUrl":1066,"ogSiteName":685,"ogType":686,"canonicalUrls":1066,"schema":1067},"Battling toolchain technical debt","DevOps teams can hinder the software development lifecycles and application performance if they let their toolchains become unruly. Read how GitLab can help reduce that technical debt.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667845/Blog/Hero%20Images/gl15.jpg","https://about.gitlab.com/blog/battling-toolchain-technical-debt","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Battling toolchain technical debt\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2022-06-21\",\n      }",{"title":1063,"description":1064,"authors":1069,"heroImage":1065,"date":1071,"body":1072,"category":693,"tags":1073},[1070],"Sandra Gittlen","2022-06-21","\nDevelopers love their tools. Operations teams love their tools. And security teams love their tools. As Dev, Sec, and Ops consolidate onto a single DevOps platform, toolchain technical debt becomes exponentially more costly and complex.\n\n“Tools should be in the background enabling excellent development, operations, and security practices. However, DevOps teams are often led by their tools rather than the other way around and that can hinder all aspects of the software development lifecycle (SDLC),” says [Cindy Blake](https://gitlab.com/cblake), CISSP, director of product and solutions marketing at GitLab.\n\nAn April 2022 Gartner® report titled “Beware the DevOps Toolchain Debt Collector” notes that “many organizations find themselves with outdated, poorly governed, and unmanageable toolchains as they scale DevOps initiatives.”\n\nOne of the key findings, according to Gartner, is that “most organizations create homegrown toolchains, often leveraging the tools beyond their functional design. This not only leads to a fragmented toolchain, but also creates complications when tooling needs to be scaled, replaced, or updated.”\n\nToolchain technical debt introduces complexity as companies shift critical tasks such as reliability, governance, and compliance left in the SDLC.\n\n> Discover how GitLab 15 can help your team deliver secure software, while maintaining compliance and automating manual processes.\nSave the date for our GitLab 15 [launch event](https://page.gitlab.com/fifteen) on June 23rd!\n\n## No time for technical debt\n\nFew DevOps teams give toolchain upkeep the time and attention it requires. According to [GitLab’s 2021 DevSecOps\nsurvey](/images/developer-survey/gitlab-devsecops-2021-survey-results.pdf), nearly two-thirds of survey respondents, 61%, said they spend 20% or less of their time on toolchain integration and maintenance each month.\n\n“Developers face challenges and time constraints while maintaining these complex, stand-alone tool siloes, building fragility and technical debt that the [infrastructure and operations] leader has to deal with,” Gartner states. The research firm adds, “These outdated toolchains further increase overhead costs, magnify technical risks, add operational toil, and limit business agility.”\n\nBlake agrees: “Complex toolchains inhibit the ability to govern the software development and deployment process. Policies must be managed across tools and visibility into code changes and changes to its surrounding infrastructure become difficult to see and track. Time is wasted on managing the toolchain instead of value-added work.”\n\n## Getting purpose-driven\nThe remedy to toolchain sprawl and subsequent debt is to change strategy. Instead of putting energy into figuring out how to maintain one-off tools, DevOps teams should focus on how to enable processes and policies that support simplicity, control, and visibility across the SDLC.\n\n“These are the characteristics needed to meet reliability, governance, and compliance demands. A united platform like GitLab helps you do that,” Blake says.\n\nGartner states: “Successful infrastructure and operations leaders reduce technical debt and sustainably scale DevOps toolchain initiatives across the organization by using a prioritized, iterative strategy that minimizes friction in making changes to toolchains and more quickly delivers customer value.”\n\nAdopting a purpose-built platform instead of a complex and ad-hoc toolchain also eases an organization’s ability to automate the SDLC. “Automation abstracts complexity away from the developer and provides guard rails so DevOps teams gain greater efficiency, accuracy, and consistency,” Blake says. In addition, automation reduces the audit footprint in terms of what needs oversight and inspection.\n\nPlatforms also support automation throughout operations, including building and\ntesting infrastructure as code, so that “you can eliminate the variables when you’re trying to debug an application,” she says. This speeds troubleshooting response times and reduces application downtime.\n\nFor instance, GitLab, the One DevOps Platform, features [dependency\nlists](https://docs.gitlab.com/ee/user/application_security/dependency_list/), also known as software bill of materials (SBOM), that show which dependencies were used and help to identify where problems exist. “GitLab also helps you avoid problems altogether by consistently scanning dependencies according to policies and compliance standards that the platform provides,” Blake says. DevOps teams can easily see what changes were made when and by whom. “That visibility is critical when trying to resolve issues and prevent them from happening again,” she says.\n\n## Reclaim your DevOps team’s time\nBy adopting a single DevOps platform, organizations can reclaim developer, security, and operations time that has been spent stitching tools together or optimizing for one developer’s tool, and then backtracking through toolchains when an application breaks because those tools can’t co-exist.\n\n“DevOps teams have a lot on their plates and trying to manage unruly toolchains is simply a waste of time. You should be creating state-of-the-art software, not manually integrating and maintaining legacy tools,” Blake says.\n\nShe emphasizes that GitLab is not “rip and replace”; it’s a platform where everything needed for DevOps comes together in one place. IT leadership benefits from this united approach as well. [Value stream\nanalytics](/solutions/value-stream-management/) provide insight into your end-to-end software throughput, helping optimize IT resources most efficiently and enabling a flexible, responsive business outcome. “We meet DevOps teams where they are and put the user – whether they be a developer, operations, or security professional – in the center of the platform,” she says.\n\n[Try GitLab Ultimate for free](/free-trial/\n).\n\n_GARTNER is a registered trademark and service mark of Gartner, Inc. and/or its affiliates in the U.S. and internationally and is used herein with permission. All rights reserved._\n",[722,1074,9],"performance",{"slug":1076,"featured":6,"template":699},"battling-toolchain-technical-debt","content:en-us:blog:battling-toolchain-technical-debt.yml","Battling Toolchain Technical Debt","en-us/blog/battling-toolchain-technical-debt.yml","en-us/blog/battling-toolchain-technical-debt",{"_path":1082,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1083,"content":1089,"config":1097,"_id":1099,"_type":13,"title":1100,"_source":15,"_file":1101,"_stem":1102,"_extension":18},"/en-us/blog/best-practices-customer-feature-request",{"title":1084,"description":1085,"ogTitle":1084,"ogDescription":1085,"noIndex":6,"ogImage":1086,"ogUrl":1087,"ogSiteName":685,"ogType":686,"canonicalUrls":1087,"schema":1088},"How to incorporate private customer needs into a public product roadmap","We've had lots of experience documenting and tracking private customer feature requests effectively. Here's our best advice and how to get the most out of GitLab issues and issue trackers.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663397/Blog/Hero%20Images/logoforblogpost.jpg","https://about.gitlab.com/blog/best-practices-customer-feature-request","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to incorporate private customer needs into a public product roadmap\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christina Hupy, Ph.D.\"},{\"@type\":\"Person\",\"name\":\"Neil McCorrison\"}],\n        \"datePublished\": \"2021-09-23\",\n      }",{"title":1084,"description":1085,"authors":1090,"heroImage":1086,"date":1093,"body":1094,"category":832,"tags":1095},[1091,1092],"Christina Hupy, Ph.D.","Neil McCorrison","2021-09-23","\n\nEffectively communicating a customer’s private needs to product teams is essential to a product’s success, but it can be a tricky undertaking.\n\nTeams can face several challenges in communicating and tracking customers' requests, including protecting customer confidentiality, tracking priority and progress, and making sure the product team is getting actionable feedback that can be incorporated into product milestones.\n\nThis blog post shares GitLab's best practices and lessons learned, as well as a video conversation between GitLab CEO [Sid Sijbrandij](/company/team/#sytses) and Fleet CEO [Mike McNeil](https://www.linkedin.com/in/mikermcneil/).\n\nIn line with GitLab's [open core model](/company/stewardship/) and [transparency value](https://handbook.gitlab.com/handbook/values/#transparency), our product roadmap is public and the product team uses [public issue trackers](/gitlab-com/Product/-/issues) for feature requests and to plan the work. Because the issues are public, customers and community members can see how the product team works, what direction we are headed, and what the priorities are. Contributors can even decide to create a feature themselves.\n\nEver wonder what a DevOps Platform could do for your team? [Here's what you need to know](/solutions/devops-platform/)\n\nWhen a customer indicates a feature request to a technical account manager (TAM), the manager searches for the relevant open feature request in the product teams' issue tracker and adds a comment with generic details about the customer such as number of users and product. If an issue for that feature request does not already exist, the technical account manager can create an issue with the [Feature Proposal](https://gitlab.com/gitlab-org/gitlab/-/issues/new?issuable_template=Feature%20Proposal%20-%20lean) issue template then and add the customer’s request as a comment.\n\nFor example, the comment should include the following:\n\n> Hello `@product-manager`,  an Ultimate customer with 1500 users (`salesforce-link`) would like to see this feature prioritized, ideally within the next 6 months. They need this feature in order to X, which is important to them because Y, and they do not currently have a workaround. Additionally, releasing this feature would result in an estimated 250 additional users.\n\nThe TAM includes a link to the account in the customer relationship management system (CRM), in GitLab’s case Salesforce, so the internal teams can view the details. We even have a [feedback template](/handbook/product/how-to-engage/#feedback-template) to ensure the proper details are captured in the comment. The comment is public but the record in the CRM is private.\n\nThe product manager reviews the request and responds. Relevant [labels](/handbook/customer-success/csm/product/#priority-of-feature-requests) are added based on priority. For example, labels include **critical requests**, **high-priority requests**, **low priority requests** or **promised to a customer**. Milestones can be assigned to track timelines and make sure the feature ships on time. The feature tracking issue should be maintained regularly and acts as the single source of truth on the customer needs. These issues can also be reviewed for metrics on previously delivered feature requests.\n\n**Elevating your DevOps skills? Join us at [Commit at KubeCon - Oct. 11!](/events/commit/)**\n\nIn this case, a noisy feature request issue with comments from customers is a good thing. It helps the product manager directly see where the action is and how customers would benefit, and it also helps when prioritizing what feature ships next. Seeing direct input from the customers provides context and also creates developer empathy and connection with the end user. Additional team members, including [solution architects](https://handbook.gitlab.com/job-families/sales/solutions-architect/) find it useful to subscribe to these issues, keeping them automatically updated on progress and discussion by the product team.\n\n**Getting the product team involved early on is essential** to the success of this workflow. Another essential element is that the CSMs bring their customers'feedback directly to the issue where the work is being planned and prioritized.\n\n**Contributing to GitLab:**   Once a product manager has triaged an issue and applied the appropriate [Product Development Workflow](/handbook/product-development-flow/) labels, it may be deemed that the feature is ready for the customer or community to help build the feature directly. Our motto is \"Everyone Can Contribute\", and the ~\"Accepting Merge Requests\" label ([handbook](/handbook/engineering/quality/triage-operations/#sts=Accepting%20merge%20requests)) is a great way to identify when a feature is ready for a community contribution. Customers who wish to contribute back to GitLab can ask for a [Merge Request Coach](https://handbook.gitlab.com/job-families/expert/merge-request-coach/) to help guide them through the process to ensure timely review and alignment with our engineering best practices.\n\nGitLab learned early on that creating a separate issue for customer feedback can get complicated and ends up being disjointed from where the product managers are doing their work.\n\nIn summary, best practices for delivering customer feature requests to the product team include:\n\n* Ensure the feedback is directly where the product managers are working and prioritizing features.\n* Provide only generic details on the customer with a link to internal confidential information, but provide as much detail as possible regarding the customer's use case and business need.\n* Share the feature request issue back with the customer. If they feel inclined, they can comment and add details. This builds trust between the customer, their account team, and the product team.\n* Labels and milestones are essential for tracking. If something is critical to the customer, make sure the labels and milestones indicate as such.\n* The feature request issue should act as the single source of truth for the customers' needs; aggregating this information elsewhere results in a disconnect between the need and the work.\n\nWatch the full discussion between Sid and Mike:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/JH2cFhoUzsI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\nSid discussing GitLab's best practices on tracking customer feedback with Fleet CEO Mike McNeil\n{: .note}\n\n",[9,834,1096,696],"customers",{"slug":1098,"featured":6,"template":699},"best-practices-customer-feature-request","content:en-us:blog:best-practices-customer-feature-request.yml","Best Practices Customer Feature Request","en-us/blog/best-practices-customer-feature-request.yml","en-us/blog/best-practices-customer-feature-request",{"_path":1104,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1105,"content":1111,"config":1117,"_id":1119,"_type":13,"title":1120,"_source":15,"_file":1121,"_stem":1122,"_extension":18},"/en-us/blog/better-devops-with-gitlab-ci-cd",{"title":1106,"description":1107,"ogTitle":1106,"ogDescription":1107,"noIndex":6,"ogImage":1108,"ogUrl":1109,"ogSiteName":685,"ogType":686,"canonicalUrls":1109,"schema":1110},"Unlock better DevOps with GitLab CI/CD","Why a single application helps to eliminate silos and knowledge gaps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670652/Blog/Hero%20Images/dev-to-devops-cover.png","https://about.gitlab.com/blog/better-devops-with-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Unlock better DevOps with GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-10-18\",\n      }",{"title":1106,"description":1107,"authors":1112,"heroImage":1108,"date":1114,"body":1115,"category":718,"tags":1116},[1113],"Chrissie Buchanan","2019-10-18","\nWe’ve talked about how the [seamless collaboration between Development and IT operations is a beautiful thing](/topics/devops/build-a-devops-team/). When an organization has a healthy DevOps culture, they’re able to meet business objectives and increase delivery speed. DevOps is meant to eliminate silos so everyone can get on the same page, and the tools you use can play a big role in just how successful, or unsuccessful, your DevOps strategy is.\n\n## Complicated tools create silos\n\nOne of the ways that operations can be at a disadvantage is by having to maintain a [complicated plug-in environment](/blog/plugin-instability/). This scenario becomes especially problematic when things go wrong and developers are relying on a specific group to fix the problem. While specialization isn’t necessarily a bad thing (devs shouldn’t have to do ops, and vice versa), usually the expertise needed to manage a plugin environment is a specialization within an already specialized group.\n\nJenkins is the most popular example of this kind of complexity, for a few reasons:\n\n*   **Jenkins architecture requires maintaining a large set of build environment systems**: At scale, this requires many dedicated people to manage machines, install and manage build tools (NodeJS, Python, Java, et al.), monitor machines, etc.\n\n*   **Upgrading is a risk (Jenkins or plug-ins)**: There is a good chance that upgrades can cause processes to fail, leading to broken builds or downtime.\n\n*   **Groovy is hard to maintain**: This isn't a widely popular script language, so it is harder to find experts to manage it and it's hard to debug due to a lack of debuggers.\n\n*   **Jenkins does not support any kind of clustering or failover**: The web UI is run on a web container known as Jenkins master, and you can only have one. For a large team of developers needing to use Jenkins all at once, that one instance needs to be very closely monitored with limited permissions.\n\nA large Jenkins plug-in environment creates silos within silos and knowledge gaps that are hard to overcome. What this leads to is a “throw it over the wall” team dynamic: Because the system depends on the expertise of a very limited number of people, developers have to submit code and hope their experts have the skills to manage it.\n\n## Lack of visibility keeps teams in the dark\n\nIn order for [DevOps](/topics/devops/) to thrive there needs to be an understanding of what every team is doing and clarity around processes. Unfortunately, a tool like Jenkins doesn’t necessarily facilitate this. Because users can’t see other users’ commits, they can’t visualize the SDLC as a whole. This only isolates teams even further.\n\nTeams that work within this plug-in environment often download the plug-ins they need, which makes it hard for Jenkins admins to standardize across teams. That, in turn, makes it harder for admins to manage the dependencies and maintain plug-ins properly, which can lead to more broken builds.\n\nWhile plug-ins are a common way to add functionality into a toolchain, it doesn’t address the problems of a toolchain that hinder teams trying to implement DevOps:\n\n*   Lack of visibility\n*   Knowledge gaps\n*   Work silos\n\n## Why single application CI/CD makes better DevOps\n\nAs a complete [DevOps platform](/solutions/devops-platform/) delivered as a single application, we provide a tool that covers all parts of the SDLC from one interface. CI and CD are just one part of the lifecycle, and by having functionality like [SCM, Issue tracking, Security testing, and Monitoring](/solutions/jenkins/) built right in, we’re making it easier for teams to work with DevOps best practices.\n\nIf you would like to see a demo of GitLab CI/CD and how we compare to Jenkins, and access other curated content around CI/CD, you can watch our most recent webcast.\n\n[Watch the demo.](/blog/migrating-from-jenkins/)\n{: .alert .alert-gitlab-purple .text-center}\n",[108,9,722],{"slug":1118,"featured":6,"template":699},"better-devops-with-gitlab-ci-cd","content:en-us:blog:better-devops-with-gitlab-ci-cd.yml","Better Devops With Gitlab Ci Cd","en-us/blog/better-devops-with-gitlab-ci-cd.yml","en-us/blog/better-devops-with-gitlab-ci-cd",{"_path":1124,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1125,"content":1131,"config":1138,"_id":1140,"_type":13,"title":1141,"_source":15,"_file":1142,"_stem":1143,"_extension":18},"/en-us/blog/biggest-obstacles-to-getting-work-done",{"title":1126,"description":1127,"ogTitle":1126,"ogDescription":1127,"noIndex":6,"ogImage":1128,"ogUrl":1129,"ogSiteName":685,"ogType":686,"canonicalUrls":1129,"schema":1130},"Why deadlines get missed (and how to fix it)","These are the biggest obstacles preventing developers from getting work done – and how to tackle them.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671344/Blog/Hero%20Images/obstacles-to-getting-work-done.jpg","https://about.gitlab.com/blog/biggest-obstacles-to-getting-work-done","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why deadlines get missed (and how to fix it)\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2017-06-27\",\n      }",{"title":1126,"description":1127,"authors":1132,"heroImage":1128,"date":1134,"body":1135,"category":718,"tags":1136},[1133],"Rebecca Dodd","2017-06-27","\nIt's not just unnecessary meetings or outdated tools – developers are more concerned with communication and culture issues preventing them from getting work over the finish line in time. So how do you combat this problem?\n\n\u003C!-- more -->\n\nIn the past, the software development process has followed a linear path, with teams doing their work and then handing off responsibility for a project to whoever takes on the next stage, giving little (if any!) thought to how others will manage when their turn comes. This creates a disconnect between the team making the decisions and the team executing them, which can lead to mismanaged expectations and delayed releases.\n\n## What are the biggest obstacles to getting work done?\n\nIn our [Global Developer Survey](https://page.gitlab.com/2016-developer-survey_2016-developer-survey.html), we asked developers what prevents them from meeting deadlines, and the responses were a combination of the obvious: unnecessary meetings (16.25 percent) and being forced to use inappropriate or outdated tools (6.16 percent); as well as more concerning, systemic issues, which point toward the breakdown in communication between developers and product owners.\n\nUnclear direction came in tops with over 47 percent and unrealistic deadlines followed with 21.29 percent. These issues also manifest in [code getting released too early](/blog/why-code-is-released-too-early/). So how do you combat this?\n\n##  Introduce DevOps practices\n\n Adopting elements of the DevOps approach instead, and taking a more collaborative attitude towards setting deadlines and deciding on what will go into the next release means that all participants and stakeholders can weigh in on the decision and flag potential problems or delays.\n\n### Work on smaller releases\n\nWorking more [iteratively](https://handbook.gitlab.com/handbook/values/#iteration) helps to prevent bottlenecks as you approach a deadline, as you're trying to fit less into each release. Stripping a new feature or package down to its smallest components (the Minimum Viable Changes) helps to clarify what exactly you're working on and what the expectations are, so the way forward is clearer to everyone involved.\n\nTo find out more about iteration and Minimum Viable Changes, check out [How to Shorten the Conversation Cycle](/blog/how-to-shorten-conversation-cycle/).\n\n### Create cross-functional teams\n\nWhen teams interact throughout the development process, instead of just handing off to each other, you create a culture in which everyone feels responsible for the final outcome rather than just their portion of a project. From the early planning phases, through development, testing and review, involving the right stakeholders and experts at the right times results in better mutual understanding of different teams' unique motivations and pressures. This helps everyone to take these factors into account when deciding on deadlines or what to include in a release, so the direction is clear and the timeline realistic.\n\n### Encourage collaboration\n\nFor those cross-functional teams to be effective, collaboration is critical. If you've been working in siloed teams without much interaction, this can be a challenge to implement. But making everyone feel comfortable to share their ideas and contribute means you get more diverse perspectives on what you're working on and ensures that you take advantage of every team's expertise and factor in any limitations too. [Here are three ways we try to foster collaboration at GitLab](/blog/ways-to-encourage-collaboration/).\n\nDownload our [Global Developer Report](https://page.gitlab.com/2016-developer-survey_2016-developer-survey.html) to learn more about what developers want and need to do their jobs more efficiently.\n{: .alert .alert-gitlab-orange}\n\nCover image: “[Brickwall](https://unsplash.com/collections/834185/obstacles?photo=9OEE8Ktcaac)” by [Namrod Gorguis](https://unsplash.com/@namroud)\n{: .note}\n",[1137,9,696],"webcast",{"slug":1139,"featured":6,"template":699},"biggest-obstacles-to-getting-work-done","content:en-us:blog:biggest-obstacles-to-getting-work-done.yml","Biggest Obstacles To Getting Work Done","en-us/blog/biggest-obstacles-to-getting-work-done.yml","en-us/blog/biggest-obstacles-to-getting-work-done",{"_path":1145,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1146,"content":1152,"config":1160,"_id":1162,"_type":13,"title":1163,"_source":15,"_file":1164,"_stem":1165,"_extension":18},"/en-us/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions",{"title":1147,"description":1148,"ogTitle":1147,"ogDescription":1148,"noIndex":6,"ogImage":1149,"ogUrl":1150,"ogSiteName":685,"ogType":686,"canonicalUrls":1150,"schema":1151},"Explore the Dragon Realm: Building a C++ adventure game with AI","How to use GitLab Duo Code Suggestions to create a text-based adventure game, including magical locations to visit and items to procure, using C++.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663344/Blog/Hero%20Images/compassinfield.jpg","https://about.gitlab.com/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Explore the Dragon Realm: Build a C++ adventure game with a little help from AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2023-08-24\",\n      }",{"title":1153,"description":1148,"authors":1154,"heroImage":1149,"date":1156,"body":1157,"category":764,"tags":1158},"Explore the Dragon Realm: Build a C++ adventure game with a little help from AI",[1155],"Fatima Sarah Khalid","2023-08-24","Learning, for me, has never been about reading a textbook or sitting in on a\nlecture - it's been about experiencing and immersing myself in a hands-on\nchallenge. This is particulary true for new programming languages. With\n[GitLab Duo Code Suggestions](https://about.gitlab.com/gitlab-duo/),\nartificial intelligence (AI) becomes my interactive guide, providing an\nenvironment for trial, error, and growth. In this tutorial, we will build a\ntext-based adventure game in C++ by using Code Suggestions to learn the\nprogramming language along the way.\n\n\nYou can use this table of contents to navigate into each section. It is\nrecommended to read top-down for the best learning experience.\n\n\n- [Setup](#setup)\n  - [Installing VS Code](#installing-vs-code)\n  - [Installing Clang as a compiler](#installing-clang-as-a-compiler)\n  - [Setting up VS Code](#setting-up-vs-code)\n- [Getting started](#getting-started)\n  - [Compiling and running your program](#compiling-and-running-your-program)\n- [Setting the text adventure stage](#setting-the-adventure-stage)\n\n- [Defining the adventure: Variables](#defining-the-adventure-variables)\n\n- [Crafting the adventure: Making decisions with\nconditionals](#crafting-the-adventure-making-decisions-with-conditionals)\n\n- [Structuring the narrative:\nCharacters](#structuring-the-narrative-characters)\n\n- [Structuring the narrative: Items](#structuring-the-narrative-items)\n\n- [Applying what we've learned at the Grand\nLibrary](#applying-what-weve-learned-at-the-grand-library)\n\n- [See you next time in the Dragon\nRealm](#see-you-next-time-in-the-dragon-realm)\n\n- [Share your feedback](#share-your-feedback)\n\n\n> Download [GitLab Ultimate for free](https://about.gitlab.com/gitlab-duo/)\nfor a trial of GitLab Duo Code Suggestions.\n\n\n## Setup\n\nYou can follow this tutorial in your [preferred and supported\nIDE](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-in-other-ides-and-editors).\nReview the documentation to enable Code Suggestions for [GitLab.com\nSaaS](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-gitlab-saas)\nor [GitLab self-managed\ninstances](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-self-managed-gitlab).\n\n\nThese installation instructions are for macOS Ventura on M1 Silicon. \n\n\n### Installing VS Code\n\n\n* Download and install [VS Code](https://code.visualstudio.com/download).\n\n* Alternatively, you can also install it as a Homebrew cask: `brew install\n--cask visual-studio-code`.\n\n\n### Installing Clang as a compiler\n\n\n* On macOS, you'll need to install some developer tools. Open your terminal\nand type:\n\n\n```\n\nxcode-select --install\n\n```\n\n\nThis will prompt you to install Xcode's command line tools, which include\nthe [Clang C++ compiler](https://clang.llvm.org/get_started.html).\n\n\nAfter the installation, you can check if `clang++` is installed by typing:\n\n\n```\n\nclang++ --version\n\n```\n\n\nYou should see an output that includes some information about the Clang\nversion you have installed. \n\n\n### Setting up VS Code\n\n\n* Launch VS Code.\n\n* Install and configure [the GitLab Workflow\nextension](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow).\n\n* Optionally, in VS Code, install the [C/C++ Intellisense\nextension](https://marketplace.visualstudio.com/items?itemName=ms-vscode.cpptools),\nwhich helps with debugging C/C++. \n\n\n## Getting started\n\nNow, let's start building this magical adventure with C++. We'll start with\na \"Hello World\" example.\n\n\nCreate a new project `learn-ai-cpp-adventure`. In the project root, create\n`adventure.cpp`. The first part of every C++ program is the `main()`\nfunction. It's the entry point of the program.\n\n\nWhen you start writing `int main() {`, Code Suggestions will help\nautocomplete the function with some default parameters.\n\n\n![adventure.cpp with a hello world implementation suggested by Code\nSuggestions](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/0-helloworld.png){:\n.shadow}\n\n\n```cpp\n\nint main()\n\n{\n    cout \u003C\u003C \"Hello World\" \u003C\u003C endl;\n    return 0;\n}\n\n```\n\n\nWhile this is a good place to start, we need to add an include and update\nthe output statement:\n\n\n```cpp\n\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n\n// Main function, the starting point of the program\n\nint main()\n\n{\n    // Print \"Hello World!\" to the console\n    std::cout \u003C\u003C \"Hello World!\" \u003C\u003C std::endl;\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n\n```\n\n\nThe program prints \"Hello World!\" to the console when executed.\n\n\n* `#include \u003Ciostream>`: Because we are building a text-based adventure, we\nwill rely on input from the player using input and output operations (I/O)\nin C++. This include is a preprocessor directive that tells our program to\ninclude the `iostream` library, which provides facilities to use input and\noutput streams, such as `std::cout` for output.\n\n\n* You might find that Code Suggestions suggests `int main(int argc, char*\nargv[])` as the definition of our main function. The parameters `(int argc,\nchar* argv[])` are used to pass command-line arguments to the program. Code\nSuggestions added them as default parameters, but they are not needed if\nyou're not using command-line arguments. In that case, we can also define\nthe main function as `int main()`.\n\n\n* `std::cout \u003C\u003C \"Hello World!\" \u003C\u003C std::endl;`: outputs \"Hello World\" to the\nconsole. The stream operator `\u003C\u003C` is used to send the string to output.\n`std::endl` is an end-line character.\n\n\n* `return 0;`: we use `return 0;` to indicate the end of the `main()`\nfunction and return a value of 0. In C++, it is good practice to return 0 to\nindicate the program has completed successfully.\n\n\n### Compiling and running your program\n\nNow that we have some code, let's review how we'll compile and run this\nprogram. \n\n* Open your terminal or use the terminal in VSCode (View -> Terminal).\n\n* Navigate to your project directory.\n\n* Compile your program by typing:\n\n\n```bash\n\nclang++ adventure.cpp -o adventure\n\n```\n\n\nThis command tells the Clang++ compiler to compile adventure.cpp and create\nan executable named adventure. After this, run your program by typing:\n\n\n```\n\n./adventure\n\n```\n\n\nYou should see \"Hello World!\" printed in the terminal. \n\n\nBecause our tutorial uses a single source file `adventure.cpp`, we can use\nthe compiler directly to build our program. In the future, if the program\ngrows beyond a file, we'll set up additional configurations to handle\ncompilation. \n\n\n## Setting the text adventure stage\n\nBefore we get into more code, let's set the stage for our text adventure.\n\n\nFor this text adventure, players will explore the Dragon Realm. The Dragon\nRealm is full of mountains, lakes, and magic. Our player will enter the\nDragon Realm for the first time, explore different locations, meet new\ncharacters, collect magical items, and journal their adventure. At every\nlocation, they will be offered choices to decide the course of their\njourney.\n\n\nTo kick off our adventure into the Dragon Realm, let's update our\n`adventure.cpp main()` function to be more specific. As you update the\nwelcome message, you might find that Code Suggestions already knows we're\nbuilding a game.\n\n\n![adventure.cpp - Code Suggestions offers suggestion of welcoming users to\nthe Dragon Realm and knows its a\ngame](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/1-welcome-to-the-realm.png){:\n.shadow}\n\n\n```cpp\n\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n\n// Main function, the starting point of the program\n\nint main()\n\n{\n    // Print \"Hello World!\" to the console\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n\n```\n\n\n## Defining the adventure: Variables\n\nA variable stores data that can be used throughout the program scope in the\n`main()` function. A variable is defined by a type, which indicates the kind\nof data it can hold.\n\n\nLet's create a variable to hold our player's name and give it the type\n`string`. A `string` is designed to hold a sequence of characters so it's\nperfect for storing our player's name.\n\n\n```cpp\n\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n\n// Main function, the starting point of the program\n\nint main()\n\n{\n    // Print \"Hello World!\" to the console\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare a string variable to hold the player's name\n    std::string playerName;\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n\n```\n\n\nAs you do this, you may notice that Code Suggestions knows what's coming\nnext - prompting the user for their player's name.\n\n\n![adventure.cpp - Code Suggestions suggests welcoming the player with the\nplayerName\nvariable](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/2-player-name-variable.png){:\n.shadow}\n\n\nWe may be able to get more complete and specific Code Suggestions by\nproviding comments about what we'd like to do with the name - personally\nwelcome the player to the game. Start by adding our plan of action in\ncomments.\n\n\n```cpp\n    // Declare a string variable to hold the player's name\n    std::string playerName;\n\n    // Prompt the user to enter their player name\n\n    // Display a personalized welcome message to the player with their name\n```\n\n\nTo capture the player's name from input, we need to use the `std::cin`\nobject from the `iostream` library to fetch input from the player using the\nextraction operator `>>`. If you start typing `std::` to start prompting the\nuser, Code Suggestions will make some suggestions to help you gather user\ninput and save it to our `playerName` variable.\n\n\n![adventure.cpp - Code Suggestions prompts the user to input their player\nname](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/2.1-player-name-input.png){:\n.shadow}\n\n\nNext, to welcome our player personally to the game, we want to use\n`std::cout` and the `playerName` variable together:\n\n\n```cpp\n    // Declare a string variable to store the player name\n    std::string playerName;\n\n    // Prompt the user to enter their player name\n    std::cout \u003C\u003C \"Please enter your name: \";\n    std::cin >> playerName;\n\n    // Display a personalized welcome message to the player with their name\n    std::cout \u003C\u003C \"Welcome \" \u003C\u003C playerName \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C std::endl;\n```\n\n\n## Crafting the adventure: Making decisions with conditionals\n\nIt's time to introduce our player to the different locations in tbe Dragon\nRealm they can visit. To prompt our player with choices, we use\nconditionals. Conditionals allow programs to take different actions based on\ncriteria, such as user input.\n\n\nLet's offer the player a selection of locations to visit and capture their\nchoice as an `int` value that corresponds to the location they picked.\n\n\n```cpp\n\n// Display a personalized welcome message to the player with their name\n\nstd::cout \u003C\u003C \"Welcome \" \u003C\u003C playerName \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C\nstd::endl;\n\n\n// Declare an int variable to capture the user's choice\n\nint choice;\n\n```\n\n\nThen, we want to offer the player the different locations that are possible\nfor that choice. Let's start with a comment and prompt Code Suggestions with\n`std::cout` to fill out the details for us.\n\n\n![adventure.cpp - Code Suggestions suggests a multiline output for all the\nlocations listed in the code\nbelow](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3-setup-location-choice.png){:\n.shadow}\n\n\nAs you accept the suggestions, Code Suggestions will help build out the\noutput and ask the player for their input.\n\n\n![adventure.cpp - Code Suggestions suggests a multiline output for all the\nlocations listed in the code below and asks for player\ninput](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.1-capture-player-location-choice.png){:\n.shadow}\n\n\n```cpp\n    // Declare an int variable to capture the user's choice\n    int choice;\n\n    // Offer the player a choice of 3 locations: 1 for Moonlight Markets, 2 for Grand Library, and 3 for Shimmer Lake.\n    std::cout \u003C\u003C \"Where will \" \u003C\u003C playerName \u003C\u003C \" go?\" \u003C\u003C std::endl;\n    std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n    std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n    std::cout \u003C\u003C \"3. Shimmer Lake\" \u003C\u003C std::endl;\n    std::cout \u003C\u003C \"Please enter your choice: \";\n    std::cin >> choice;\n```\n\n\nOnce you start typing `std::cin >>` or accept the prompt for asking the\nplayer for their choice, Code Suggestions might offer a suggestion for\nbuilding out your conditional flow. AI is non-deterministic: One suggestion\ncan involve if/else statements while another solution uses a switch\nstatement.\n\n\nTo give Code Suggestions a nudge, we'll add a comment and start typing out\nan if statement: `if (choice ==)`.\n\n\n![adventure.cpp - Code Suggestions suggests using an if statement to manage\nchoice of\nlocations](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.2-if-statement-locations.png){:\n.shadow}\n\n\nAnd if you keep accepting the subsequent suggestions, Code Suggestions will\nautocomplete the code using if/else statements.\n\n\n![adventure.cpp - Code Suggestions helps the user fill out the rest of the\nif/else statements for choosing a\nlocation](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.2.1-if-statement-locations-continued.png){:\n.shadow}\n\n\n```cpp\n    // Check the user's choice and display the corresponding messages\n    if (choice == 1) {\n        std::cout \u003C\u003C \"You chose Moonlight Markets\" \u003C\u003C std::endl;\n    }\n    else if (choice == 2) {\n        std::cout \u003C\u003C \"You chose Grand Library\" \u003C\u003C std::endl;\n    }\n    else if (choice == 3) {\n        std::cout \u003C\u003C \"You chose Shimmer Lake\" \u003C\u003C std::endl;\n    }\n    else {\n        std::cout \u003C\u003C \"Invalid choice\" \u003C\u003C std::endl;\n    }\n```\n\n\n`if/else` is a conditional statement that allows a program to execute code\nbased on whether a condition, in this case the player's choice, is true or\nfalse. If the condition evaluates to true, the code inside the braces is\nexecuted.\n\n\n* `if (condition)`: used to check if the condition is true.\n\n* `else if (another condition)`: if the previous condition isn't true, the\nprograms checks this condition.\n\n* `else`: if none of the previous conditions are true.\n\n\nAnother way of managing multiple choices like this example is using a\n`switch()` statement. A `switch` statement allows our program to jump to\ndifferent sections of code based on the value of an expression, which, in\nthis case, is the value of `choice`.\n\n\nWe are going to replace our `if/else` statements with a `switch` statement.\nYou can comment out or delete the `if/else` statements and prompt Code\nSuggestions starting with `switch(choice) {`.\n\n\n![adventure.cpp - Code Suggestions helps the user handle the switch\nstatement for the\nlocations](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.3-conditional-switch-locations.png){:\n.shadow}\n\n\n![adventure.cpp - Code Suggestions helps the user handle the switch\nstatement for the\nlocations](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.3.1-conditional-switch-locations-continued.png){:\n.shadow}\n\n\n```cpp\n    // Evaluate the player's decision\n    switch(choice) {\n        // If 'choice' is 1, this block is executed.\n        case 1:\n            std::cout \u003C\u003C \"You chose Moonlight Markets.\" \u003C\u003C std::endl;\n            break;\n        // If 'choice' is 2, this block is executed.\n        case 2:\n            std::cout \u003C\u003C \"You chose Grand Library.\" \u003C\u003C std::endl;\n            break;\n        // If 'choice' is 3, this block is executed.\n        case 3:\n            std::cout \u003C\u003C \"You chose Shimmer Lake.\" \u003C\u003C std::endl;\n            break;\n        // If 'choice' is not 1, 2, or 3, this block is executed.\n        default:\n            std::cout \u003C\u003C \"You did not enter 1, 2, or 3.\" \u003C\u003C std::endl;\n    }\n```\n\n\nEach case represents a potential value that the variable or expression being\nswitched on (in this case, choice) could have. If a match is found, the code\nfor that case is executed. We use the `default` case to handle any input\nerrors in case the player enters a value that isn't accounted for.\n\n\nLet's build out what happens when our player visits the Shimmering Lake.\nI've added some comments after the player's arrival at Shimmering Lake to\nprompt Code Suggestions to help us build this out:\n\n\n```cpp\n    // If 'choice' is 3, this block is executed.\n    case 3:\n        std::cout \u003C\u003C \"You chose Shimmering Lake.\" \u003C\u003C std::endl;\n        // The player arrives at Shimmering Lake. It is one of the most beautiful lakes the player has ever seen.\n        // The player hears a mysterious melody from the water.\n        // They can either 1. Stay quiet and listen, or 2. Sing along with the melody.\n\n        break;\n```\n\n\nNow, if you start writing `std::cout` to begin offering the player this new\ndecision point, Code Suggestions will help fill out the output code.\n\n\n![adventure.cpp - Code Suggestions helps fill out the output code based on\nthe comments about the interaction at the\nLake](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4-case-3-output.png){:\n.shadow}\n\n\nYou might find that the code provided by Code Suggestions is very\ndeclarative. Once I've accepted the suggestion, I personalize the code as\nneeded. For example in this case, including the melody the player heard and\nusing the player's name instead of \"you\":\n\n\n![adventure.cpp - I added the playerName to the output and then prompted\nCode Suggestions to continue the narrative based on the comments\nabove](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4.1-customizing-output.png){:\n.shadow}\n\n\nI also wanted Code Suggestions to offer suggestions in a specific format, so\nI added an end line:\n\n\n![adventure.cpp - I added an end line to prompt Code Suggestions to break\nthe choices into end line\noutputs](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4.2-customizing-output-endline.png){:\n.shadow}\n\n\n![adventure.cpp - I added an endline to prompt Code Suggestions to break the\nchoices into end line\noutputs](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4.3-sub-choices-output.png){:\n.shadow}\n\n\nNow, we'd like to offer our player a nested choice in this scenario. Before\nwe can define the new choices, we need a variable to store this nested\nchoice. Let's define a new variable `int nestedChoice` in our `main()`\nfunction, outside of the `switch()` statement we set up. You can put it\nafter our definition of the `choice` variable.\n\n\n```cpp\n    // Declare an int variable to capture the user's choice\n    int choice;\n    // Declare an int variable to capture the user's nested choice\n    int nestedChoice;\n```\n\n\nNext, returning to the `if/else` statement we were working on in `case 3`,\nwe want to prompt the player for their decision and save it in\n`nestedChoice`.\n\n\n![adventure.cpp - I added an end line to prompt Code Suggestions to break\nthe choices into end line\noutputs](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4.4-capture-nested-choice.png){:\n.shadow}\n\n\nAs you can see, Code Suggestions wants to go ahead and handle the user's\nchoice using another `switch` statement. I would prefer to use an `if/else`\nstatement to handle this decision point.\n\n\nFirst, let's add some comments to give context:\n\n\n```cpp\n    // Capture the user's nested choice\n    std::cin >> nestedChoice;\n\n    // If the player chooses 1 and remains silent, they hear whispers of the merfolk below, but nothing happens.\n    // If the player chooses 2 and sings along, a merfolk surfaces and gifts them a special blue gem as a token of appreciation for their voice.\n\n    // Evaluate the user's nestedChoice\n```\n\n\nThen, start typing `if (nestedChoice == 1)` and Code Suggestions will start\nto offer suggestions:\n\n\n![adventure.cpp - Code Suggestions starts to build out an if statement to\nhandle the\nnestedChoice](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.5-nested-choice-if.png){:\n.shadow}\n\n\nIf you tab to accept them, Code Suggestions will continue to fill out the\nrest of the nested `if/else` statements.\n\n\nSometimes, while you're customizing the suggestions that Code Suggestions\ngives, you may even discover that it would like to make creative\nsuggestions, too!\n\n\n![adventure.cpp - Code Suggestions makes a creative suggestion to end the\ninteraction with the merfolk by saying \"You are now free to go\" after you\nreceive the\ngem.](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.5.2-nested-cs-creative-suggestion.png){:\n.shadow}\n\n\nHere's the code for `case 3` for the player's interaction at Shimmering Lake\nwith the nested decision. I've updated some of the narrative dialogue\nplayer's name.\n\n```\n    // Handle the Shimmering Lake scenario.\n    case 3:\n        std::cout \u003C\u003C playerName \u003C\u003C \" arrives at Shimmering Lake. It is one of the most beautiful lakes that\" \u003C\u003C playerName \u003C\u003C \" has seen. They hear a mysterious melody from the water. They can either: \" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Stay quiet and listen\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Sing along with the melody\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n\n        // Capture the user's nested choice\n        std::cin >> nestedChoice;\n\n        // If the player chooses to remain silent\n        if (nestedChoice == 1)\n        {\n            std::cout \u003C\u003C \"Remaining silent, \" \u003C\u003C playerName \u003C\u003C \" hears whispers of the merfolk below, but nothing happens.\" \u003C\u003C std::endl;\n        }\n        // If the player chooses to sing along with the melody\n        else if (nestedChoice == 2)\n        {\n            std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C playerName\n                    \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                    \u003C\u003C std::endl;\n        }\n        break;\n```\n\n\nOur player isn't limited to just exploring Shimmering Lake. There's a whole\nrealm to explore and they might want to go back and explore other locations.\n\n\nTo facilitate this, we can use a `while` loop. A loop is a type of\nconditional that allows a specific section of code to be executed multiple\ntimes based on a condition. For the `condition` that allows our `while` loop\nto run multiple times, let's use a `boolean` to initialize the loop\ncondition.\n\n\n```cpp\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n        // wrap the code for switch(choice)\n    }\n```\n\n\nWe also need to move our location prompt inside the `while` loop so that the\nplayer can visit more than one location at the time.\n\n\n![adventure.cpp - CS helps us write a go next prompt for the\nlocations](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.6-while-loop-go-next.png){:\n.shadow}\n\n\n```cpp\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n\n        // If still exploring, ask the player where they want to go next\n        std::cout \u003C\u003C \"Where will \" \u003C\u003C playerName \u003C\u003C \" go next?\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"3. Shimmering Lake\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n        // Update value of choice\n        std::cin >> choice;\n\n        // Respond based on the player's main choice\n        switch(choice) {\n```\n\n\nOur `while` loop will keep running as long as `exploring` is `true`, so we\nneed a way for the player to have the option to exit the game. Let's add a\ncase 4 that allows the player to exit by setting `exploring = false`. This\nwill exit the loop and take the player back to the original choices.\n\n\n```cpp\n    // Option to exit the game\n    case 4:\n        exploring = false;\n        break;\n```\n\n\n**Async exercise**: Give the player the option to exit the game instead of\nexploring a new decision.\n\n\nWe also need to update the error handling for invalid inputs in the `switch`\nstatement. You can decide whether to end the program or use the `continue`\nstatement to start a new loop iteration.\n\n\n```cpp\n        default:\n            std::cout \u003C\u003C \"You did not enter a valid choice.\" \u003C\u003C std::endl;\n            continue; // Errors continue with the next loop iteration\n```\n\n\nUsing I/O and conditionals is at the core of text-based adventure games and\nhelps make these games interactive. We can combine user input, display\noutput, and implement our narrative into decision-making logic to create an\nengaging experience.\n\n\nHere's what our `adventure.cpp` looks like now with some comments:\n\n\n```cpp\n\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n\n// Main function, the starting point of the program\n\nint main()\n\n{\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare a string variable to store the player name\n    std::string playerName;\n\n    // Prompt the user to enter their player name\n    std::cout \u003C\u003C \"Please enter your name: \";\n    std::cin >> playerName;\n\n    // Display a personalized welcome message to the player with their name\n    std::cout \u003C\u003C \"Welcome \" \u003C\u003C playerName \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare an int variable to capture the user's choice\n    int choice;\n    // Declare an int variable to capture the user's nested choice\n    int nestedChoice;\n\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n\n        // If still exploring, ask the player where they want to go next\n        std::cout \u003C\u003C \"Where will \" \u003C\u003C playerName \u003C\u003C \" go next?\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"3. Shimmering Lake\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n        // Update value of choice\n        std::cin >> choice;\n\n        // Respond based on the player's main choice\n        switch(choice) {\n            //  Handle the Moonlight Markets scenario\n            case 1:\n                std::cout \u003C\u003C \"You chose Moonlight Markets.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Grand Library scenario.\n            case 2:\n                std::cout \u003C\u003C \"You chose Grand Library.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Shimmering Lake scenario.\n            case 3:\n                std::cout \u003C\u003C playerName \u003C\u003C \" arrives at Shimmering Lake. It is one of the most beautiful lakes that\" \u003C\u003C playerName \u003C\u003C \" has seen. They hear a mysterious melody from the water. They can either: \" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"1. Stay quiet and listen\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"2. Sing along with the melody\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"Please enter your choice: \";\n\n                // Capture the user's nested choice\n                std::cin >> nestedChoice;\n\n                // If the player chooses to remain silent\n                if (nestedChoice == 1)\n                {\n                    std::cout \u003C\u003C \"Remaining silent, \" \u003C\u003C playerName \u003C\u003C \" hears whispers of the merfolk below, but nothing happens.\" \u003C\u003C std::endl;\n                }\n                // If the player chooses to sing along with the melody\n                else if (nestedChoice == 2)\n                {\n                    std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C playerName\n                            \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                            \u003C\u003C std::endl;\n                }\n                break;\n            // Option to exit the game\n            case 4:\n                exploring = false;\n                break;\n            // If 'choice' is not 1, 2, or 3, this block is executed.\n            default:\n                std::cout \u003C\u003C \"You did not enter a valid choice.\" \u003C\u003C std::endl;\n                continue; // Errors continue with the next loop iteration\n        }\n    }\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n\n```\n\n\nHere's what the build output looks like if we run `adventure.cpp` and the\nplayer heads to the Shimmering Lake.\n\n\n![adventure.cpp build output - the player is called sugaroverflow and heads\nto the Shimmering Lake and receives a\ngem](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.6.1-full-case-3-output.png){:\n.shadow}\n\n\n## Structuring the narrative: Characters\n\nOur player can now explore the world. Soon, our player will also be able to\nmeet people and collect objects. Before we can do that, let's organize the\nthings our player can do with creating some structure for the player\ncharacter.\n\n\nIn C++, a `struct` is used to group different data types. It's helpful in\ncreating a group of items that belong together, such as our player's\nattributes and inventory, into a single unit. `struct` objects are defined\nglobally, which means at top the file, before the `main() function.\n\n\nIf you start typing `struct Player {`, Code Suggestions will help you out\nwith a sample definition of a player struct.\n\n\n![adventure.cpp - Code Suggestions helps with setting up the struct\ndefinition for the\nplayer](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/4-player-struct-definition.png){:\n.shadow}\n\n\nAfter accepting this suggestion, you might find that Code Suggestions is\neager to define some functions to make this game more fun, such as hunting\nfor treasure.\n\n\n![adventure.cpp - Code Suggestions provides a suggestion for creating\nfunctions to hunt for\ntreasure.](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/4.1-player-struct-treasure-suggestion.png){:\n.shadow}\n\n\n```cpp\n\n// Define a structure for a Player in the game.\n\nstruct Player{\n    std::string name;  // The name of the player.\n    int health;        // The current health of the player.\n    int xp;            // Experience points gained by the player. Could be used for leveling up or other game mechanics.\n};\n\n```\n\n\nGiving the player experience points was not in my original plan for this\ntext adventure game, but Code Suggestions makes an interesting suggestion.\nWe could use `xp` for leveling up or for other game mechanics as our project\ngrows.\n\n\n`struct Player` provides a blueprint for creating a player and details the\nattributes that make up a player. To use our player in our code, we must\ninstantiate, or create, an object of the `Player` struct within our `main()`\nfunction. Objects in C++ are instances of structures that contain\nattributes. In our example, we're working with the `Player` struct, which\nhas attributes like name, health, and xp.\n\n\nAs you're creating a `Player` object, you might find that Code Suggestions\nwants to name the player \"John.\"\n\n\n![adventure.cpp - code suggestions suggests naming the new Player object\nJohn.](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/4.2-player-struct-instance-john.png){:\n.shadow}\n\n\n```cpp\n\nint main() {\n    // Create an instance of the Player struct\n    Player player;\n    player.health = 100; // Assign a default value for HP\n```\n\n\nInstead of naming our player \"John\" for everyone, we'll use the `Player`\nobject to set the attribute for name. When we want to interact with or\nmanipulate an attribute of an object, we use the dot operator `.`. The dot\noperator allows us to access specific members of the object. We can set the\nplayer's name using the dot operator with `player.name`.\n\n\nNote that we need to replace other mentions of `playerName` the variable\nwith `player.name`, which allows us to access the player object's name\ndirectly.\n\n\n* Search for all occurrences of the `playerName` variable, and replace it\nwith `player.name`.\n\n* Comment/Remove the unused `std::string playerName` variable after that.\n\n\nWhat your `adventure.cpp` will look like now:\n\n\n```cpp\n\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n\n// Define a structure for a Player in the game.\n\nstruct Player{\n    std::string name;  // The name of the player.\n    int health;        // The current health of the player.\n    int xp;            // Experience points gained by the player. Could be used for leveling up or other game mechanics.\n};\n\n\n// Main function, the starting point of the program\n\nint main()\n\n{\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Create an instance of the Player struct\n    Player player;\n    player.health = 100; // Assign a default value for HP\n\n    // Prompt the user to enter their player name\n    std::cout \u003C\u003C \"Please enter your name: \";\n    std::cin >> player.name;\n\n    // Display a personalized welcome message to the player with their name\n    std::cout \u003C\u003C \"Welcome \" \u003C\u003C player.name \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare an int variable to capture the user's choice\n    int choice;\n    // Declare an int variable to capture the user's nested choice\n    int nestedChoice;\n\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n\n        // If still exploring, ask the player where they want to go next\n        std::cout \u003C\u003C \"Where will \" \u003C\u003C player.name \u003C\u003C \" go next?\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"3. Shimmering Lake\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n        // Update value of choice\n        std::cin >> choice;\n\n        // Respond based on the player's main choice\n        switch(choice) {\n            //  Handle the Moonlight Markets scenario\n            case 1:\n                std::cout \u003C\u003C \"You chose Moonlight Markets.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Grand Library scenario.\n            case 2:\n                std::cout \u003C\u003C \"You chose Grand Library.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Shimmering Lake scenario.\n            case 3:\n                std::cout \u003C\u003C player.name \u003C\u003C \" arrives at Shimmering Lake. It is one of the most beautiful lakes that\" \u003C\u003C player.name \u003C\u003C \" has seen. They hear a mysterious melody from the water. They can either: \" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"1. Stay quiet and listen\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"2. Sing along with the melody\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"Please enter your choice: \";\n\n                // Capture the user's nested choice\n                std::cin >> nestedChoice;\n\n                // If the player chooses to remain silent\n                if (nestedChoice == 1)\n                {\n                    std::cout \u003C\u003C \"Remaining silent, \" \u003C\u003C player.name \u003C\u003C \" hears whispers of the merfolk below, but nothing happens.\" \u003C\u003C std::endl;\n                }\n                // If the player chooses to sing along with the melody\n                else if (nestedChoice == 2)\n                {\n                    std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C player.name\n                            \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                            \u003C\u003C std::endl;\n                }\n                break;\n            // Option to exit the game\n            case 4:\n                exploring = false;\n                break;\n            // If 'choice' is not 1, 2, or 3, this block is executed.\n            default:\n                std::cout \u003C\u003C \"You did not enter a valid choice.\" \u003C\u003C std::endl;\n                continue; // Errors continue with the next loop iteration\n        }\n    }\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n\n```\n\n\n## Structuring the narrative: Items\n\nAn essential part of adventure games is a player's inventory - the\ncollection of items they acquire and use during their journey. For example,\nat Shimmering Lake, the player acquired a blue gem.\n\n\nLet's update our Player `struct` to include an inventory using an array. In\nC++, an `array` is a collection of elements of the same type that can be\nidentified by an index. When creating an array, you need to specify its type\nand size. Start by adding `std::string inventory` to the Player `struct`:\n\n\n![adventure.cpp - Code Suggestions shows us how to add an array of strings\nto the player struct to use as the players\ninventory](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5-add-inventory-player-struct.png){:\n.shadow}\n\n\nYou might find that Code Suggestions wants our player to be able to carry\nsome gold, but we don't need that for now. Let's also add `int\ninventoryCount;` to keep track of the number of items in our player's\ninventory.\n\n\n![adventure.cpp - Code Suggestions shows us how to add an integer for\ninventoryCount to the player\nstruct](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.1-add-inventory-count-player-struct.png){:\n.shadow}\n\n\n```cpp\n\n// Define a structure for a Player in the game.\n\nstruct Player{\n    std::string name;  // The name of the player.\n    int health;        // The current health of the player.\n    int xp;            // Experience points gained by the player. Could be used for leveling up or other game mechanics.\n    std::string inventory[10];  // An array of strings for the player's inventory.\n    int inventoryCount = 0;  // The number of items in the player's inventory.\n};\n\n```\n\nIn our Player `struct`, we have defined an array for our inventory that can\nhold the names of 10 items (type:string, size: 10). As the player progresses\nthrough our story, we can assign new items to the inventory array based on\nthe player's actions using the array index.\n\n\nSometimes Code Suggestions gets ahead of me and tries to add more complexity\nto the game by suggesting that we need to create a `struct` for some\nMonsters. Maybe later, Code Suggestions!\n\n\n![adventure.cpp - Code Suggestions wants to add a struct for Monsters we can\nbattle](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.2-suggestion-gets-distracted-by-monsters.png\n\n){: .shadow}\n\n\nBack at the Shimmering Lake, the player received a special blue gem from the\nmerfolk. Let's update the code in `case 2` for the Shimmering Lake to add\nthe gem to our player's inventory.\n\n\nYou can start by accessing the player's inventory with `player.inventory`\nand Code Suggestions will help add the gem.\n\n\n![adventure.cpp - Code Suggestions shows us how to add a gem to the player's\ninventory using a post-increment operation and the inventory array from the\nstruct\nobject](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.3-add-gem-to-inventory.png){:\n.shadow}\n\n\n```cpp\n    // If the player chooses to sing along with the melody\n    else if (nestedChoice == 2)\n    {\n        std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C player.name\n                \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                \u003C\u003C std::endl;\n        player.inventory[player.inventoryCount] = \"Blue Gem\";\n        player.inventoryCount++;\n    }\n```\n\n\n* `player.inventory`: accesses the inventory attribute of the player object\n\n* `player.inventoryCount`: accesses the integer that keeps track of how many\nitems are currently in the player's inventory. This also represents the next\navailable index in our inventory array where an item can be stored.\n\n* `player.inventoryCount++`: increments the value of inventoryCount by 1.\nThis is a post-increment operation. We are adding “Blue Gem” to the next\navailable slot in the inventory array and incrementing the array for the\nnewly added item.\n\n\nOnce we've added something to our player's inventory, we may also want to be\nable to look at everything in the inventory. We can use a `for` loop to\niterate over the inventory array and display each item.\n\n\nIn C++, a `for` loop allows code to be repeatedly executed a specific number\nof times. It's different from the `while` loop we used earlier because the\n`while` executes its body based on a condition, whereas a `for` loop\niterates over a sequence or range, usually with a known number of times.\n\n\nAfter adding the gem to the player's inventory, let's display all the items\nit has. Try starting a for loop with `for ( ` to display the player's\ninventory and Code Suggestions will help you with the syntax.\n\n\n![adventure.cpp - Code Suggestions demonstrates how to write a for loop to\nloop through the players\ninventory](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.4-loop-over-players-inventory.png){:\n.shadow}\n\n\n```cpp\n\nstd::cout \u003C\u003C player.name \u003C\u003C \"'s Inventory:\" \u003C\u003C std::endl;\n\n// Loop through the player's inventory up to the count of items they have\n\nfor (int i = 0; i \u003C player.inventoryCount; i++)\n\n{\n    // Output the item in the inventory slot\n    std::cout \u003C\u003C \"- \" \u003C\u003C player.inventory[i] \u003C\u003C std::endl;\n}\n\n```\n\n\nA `for` loop consists of 3 main parts:\n\n\n* `int i = 0`: is the initialization where you set up your loop variable.\nHere, we start counting from 0.\n\n* `i \u003C player.inventoryCount`: is the condition we're looping on, our loop\nchecks if `i`, the current loop variable, is less than the number of items\nin our inventory. It will keep going until this is true.\n\n* `i++`: is the iteration. This updates the loop variable each time the loop\nruns.\n\n\nTo make sure that our loop doesn't encounter an error, let's add some error\nhandling to make sure the inventory is not empty when we try to output it.\n\n\n```\n\nstd::cout \u003C\u003C player.name \u003C\u003C \"'s Inventory:\" \u003C\u003C std::endl;\n\n// Loop through the player's inventory up to the count of items they have\n\nfor (int i = 0; i \u003C player.inventoryCount; i++)\n\n{\n    // Check if the inventory slot is not empty.\n    if (!player.inventory[i].empty())\n    {\n        // Output the item in the inventory slot\n        std::cout \u003C\u003C \"- \" \u003C\u003C player.inventory[i] \u003C\u003C std::endl;\n    }\n}\n\n```\n\n\nWith our progress so far, we've successfully established a persistent\n`while` loop for our adventure, handled decisions, crafted a `struct` for\nour player, and implemented a simple inventory system. Now, let's dive into\nthe next scenario, the Grand Library, applying the foundations we've\nlearned.\n\n\n**Async exercise**: Add more inventory items found in different locations.\n\n\nHere's what we have for `adventure.cpp` so far:\n\n\n```cpp\n\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n\n// Define a structure for a Player in the game.\n\nstruct Player{\n    std::string name;  // The name of the player.\n    int health;        // The current health of the player.\n    int xp;            // Experience points gained by the player. Could be used for leveling up or other game mechanics.\n    std::string inventory[10];  // An array of strings for the player's inventory.\n    int inventoryCount = 0;\n};\n\n\n// Main function, the starting point of the program\n\nint main()\n\n{\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Create an instance of the Player struct\n    Player player;\n    player.health = 100; // Assign a default value for HP\n\n    // Prompt the user to enter their player name\n    std::cout \u003C\u003C \"Please enter your name: \";\n    std::cin >> player.name;\n\n    // Display a personalized welcome message to the player with their name\n    std::cout \u003C\u003C \"Welcome \" \u003C\u003C player.name \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare an int variable to capture the user's choice\n    int choice;\n    // Declare an int variable to capture the user's nested choice\n    int nestedChoice;\n\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n\n        // If still exploring, ask the player where they want to go next\n        std::cout \u003C\u003C \"--------------------------------------------------------\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Where will \" \u003C\u003C player.name \u003C\u003C \" go next?\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"3. Shimmering Lake\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n        // Update value of choice\n        std::cin >> choice;\n\n        // Respond based on the player's main choice\n        switch(choice) {\n            //  Handle the Moonlight Markets scenario\n            case 1:\n                std::cout \u003C\u003C \"You chose Moonlight Markets.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Grand Library scenario.\n            case 2:\n                std::cout \u003C\u003C \"You chose Grand Library.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Shimmering Lake scenario.\n            case 3:\n                std::cout \u003C\u003C player.name \u003C\u003C \" arrives at Shimmering Lake. It is one of the most beautiful lakes that\" \u003C\u003C player.name \u003C\u003C \" has seen. They hear a mysterious melody from the water. They can either: \" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"1. Stay quiet and listen\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"2. Sing along with the melody\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"Please enter your choice: \";\n\n                // Capture the user's nested choice\n                std::cin >> nestedChoice;\n\n                // If the player chooses to remain silent\n                if (nestedChoice == 1)\n                {\n                    std::cout \u003C\u003C \"Remaining silent, \" \u003C\u003C player.name \u003C\u003C \" hears whispers of the merfolk below, but nothing happens.\" \u003C\u003C std::endl;\n                }\n                // If the player chooses to sing along with the melody\n                else if (nestedChoice == 2)\n                {\n                    std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C player.name\n                            \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                            \u003C\u003C std::endl;\n                    player.inventory[player.inventoryCount] = \"Blue Gem\";\n                    player.inventoryCount++;\n\n                    std::cout \u003C\u003C player.name \u003C\u003C \"'s Inventory:\" \u003C\u003C std::endl;\n                    // Loop through the player's inventory up to the count of items they have\n                    for (int i = 0; i \u003C player.inventoryCount; i++)\n                    {\n                        // Check if the inventory slot is not empty.\n                        if (!player.inventory[i].empty())\n                        {\n                            // Output the item in the inventory slot\n                            std::cout \u003C\u003C \"- \" \u003C\u003C player.inventory[i] \u003C\u003C std::endl;\n                        }\n                    }\n\n                }\n                break;\n            // Option to exit the game\n            case 4:\n                exploring = false;\n                break;\n            // If 'choice' is not 1, 2, or 3, this block is executed.\n            default:\n                std::cout \u003C\u003C \"You did not enter a valid choice.\" \u003C\u003C std::endl;\n                continue; // Errors continue with the next loop iteration\n        }\n    }\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n\n```\n\n\n![adventure.cpp - A full output of the game at the current state - our\nplayer sugaroverflow visits the Lake, receives the gem, adds it to their\ninventory, and we display the inventory before returning to the\nloop](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.5-full-output-shimmering-lake.png){:\n.shadow}\n",[495,766,9,1159,1035],"DevSecOps",{"slug":1161,"featured":6,"template":699},"building-a-text-adventure-using-cplusplus-and-code-suggestions","content:en-us:blog:building-a-text-adventure-using-cplusplus-and-code-suggestions.yml","Building A Text Adventure Using Cplusplus And Code Suggestions","en-us/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions.yml","en-us/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions",{"_path":1167,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1168,"content":1174,"config":1180,"_id":1182,"_type":13,"title":1183,"_source":15,"_file":1184,"_stem":1185,"_extension":18},"/en-us/blog/business-impact-ci-cd",{"title":1169,"description":1170,"ogTitle":1169,"ogDescription":1170,"noIndex":6,"ogImage":1171,"ogUrl":1172,"ogSiteName":685,"ogType":686,"canonicalUrls":1172,"schema":1173},"The business impact of CI/CD","How a good CI/CD strategy generates revenue and keeps developers happy.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670016/Blog/Hero%20Images/modernize-cicd.jpg","https://about.gitlab.com/blog/business-impact-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The business impact of CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"},{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2019-06-21\",\n      }",{"title":1169,"description":1170,"authors":1175,"heroImage":1171,"date":1177,"body":1178,"category":718,"tags":1179},[1113,1176],"William Chia","2019-06-21","\n\n[Continuous integration and delivery](/solutions/continuous-integration/) helps [DevOps](/topics/devops/) teams ship higher quality software, faster. But is all [CI/CD](/topics/ci-cd/) created equal? What does successful CI/CD implementation look like and how do you know you’re on the right track?\n\nIn this four-part series, we talk about modernizing your CI/CD: Challenges, impact, outcomes, and solutions. In [part one](/blog/modernize-your-ci-cd/), we focused on common CI/CD challenges. Today, we’ll talk about the revenue impact of a poor or non-existent CI/CD strategy.\n\nIf these problems hit a little too close to home, stay tuned for part three where we dive deeper into what organizations gain when they implement better CI/CD.\n\n## What are the business impacts of bad CI/CD?\n\n### 1. A large portion of IT budget is spent on undifferentiated engineering\n\nOpportunity costs play a much larger role in the development process than we realize. Organizations can only afford so many engineers at one time, and systems that require extensive maintenance means fewer engineers are working on revenue-generating projects. This will lead to slower innovation and slower growth in the long term. Undifferentiated engineering means too many individuals are having to focus on one thing – maintenance.\n\n### 2. Delayed (and even unrealized) revenue\n\nThis is the impact of lost opportunity costs. When there are too many dependencies, too many handoffs, and too many manual tasks, it causes delays between when code is written and when the business gets value from that code. In worst cases, code is written and the business never gets any value from it at all. Code can sit in limbo waiting for others to manually test it, and by the time it’s finally reviewed it’s already irrelevant. The opportunity cost essentially doubles: Engineers were paid to work on code that never deployed, and the business loses out on revenue the code could have generated.\n\n### 3. Lower developer productivity, lower developer happiness, and less reliable software\n\nDowntime = lost revenue. To avoid that dreaded downtime, developers are spending time working on infrastructure and configuration, and they’re also not spending that time delivering business logic. In both cases, they’re being less productive and working outside of their core competencies. Developer hiring and retention will inevitably suffer. Uptime and resiliency are also affected because people who aren’t domain experts are put in charge of determining infrastructure. It’s a self-fulfilling prophecy.\n\n## What does it look like if a magic wand were to solve it today?\n\n### 1. More engineers are working on the app instead of maintenance\n\nThe organization has the right amount of developers devoted to driving business value and spends more time on innovation instead of undifferentiated heavy lifting. Less of the budget is spent on activities that don't generate revenue.\n\n### 2. Developers see their code in production quickly\n\nInfrastructure and deployment are [fully automated](https://docs.gitlab.com/ee/topics/autodevops/). Everyone loves to see the output of their work, developers especially, and the business gets to see the benefits of this code right away. Deploying smaller chunks of code is less risky when developers can take advantage of test automation, so they have less overhead and coordination with a QA team forced to test manually.\n\n### 3. Developers are focused on solving business problems\n\nCode is written to be environment and cloud agnostic. Development teams own the uptime of their own services, but they are fully supported by the ops team. Ops owns the infrastructure, dev owns the service, and both teams can work according to their strengths.\n\nSolving these problems doesn’t require waving a wand or any magic at all. Modernizing your architecture and embracing CI/CD is what other companies are doing to release better software, faster. When organizations implement CI/CD best practices, they get the added benefit of generating more revenue in the long run.\n\nSo what makes “good” CI/CD? We invite you to compare GitLab CI/CD to other CI tools and see why we were rated #1 in the Forrester CI Wave™.\n\n[Explore GitLab CI/CD](/solutions/continuous-integration/)\n{: .alert .alert-gitlab-purple .text-center}\n\nPhoto by [Jungwoo Hong](https://unsplash.com/photos/cYUMaCqMYvI?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/arrow?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[722,108,9],{"slug":1181,"featured":6,"template":699},"business-impact-ci-cd","content:en-us:blog:business-impact-ci-cd.yml","Business Impact Ci Cd","en-us/blog/business-impact-ci-cd.yml","en-us/blog/business-impact-ci-cd",{"_path":1187,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1188,"content":1194,"config":1200,"_id":1202,"_type":13,"title":1203,"_source":15,"_file":1204,"_stem":1205,"_extension":18},"/en-us/blog/chat-about-your-merge-request-with-gitlab-duo",{"title":1189,"description":1190,"ogTitle":1189,"ogDescription":1190,"noIndex":6,"ogImage":1191,"ogUrl":1192,"ogSiteName":685,"ogType":686,"canonicalUrls":1192,"schema":1193},"Chat about your merge request with GitLab Duo","Learn how to use AI-powered Chat to quickly understand complex merge requests by asking about implementation choices, potential risks, and architectural decisions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675536/Blog/Hero%20Images/blog-image-template-1800x945__2_.png","https://about.gitlab.com/blog/chat-about-your-merge-request-with-gitlab-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Chat about your merge request with GitLab Duo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Torsten Linz\"}],\n        \"datePublished\": \"2024-11-22\",\n      }",{"title":1189,"description":1190,"authors":1195,"heroImage":1191,"date":1197,"body":1198,"category":764,"tags":1199},[1196],"Torsten Linz","2024-11-22","Managing a merge request (MR) is an integral part of collaborative development, involving navigating through code changes, discussions, and dependencies to ensure high-quality outcomes. Whether you’re reviewing someone else’s code or trying to make your own changes clearer, the new [GitLab Duo Chat](https://about.gitlab.com/gitlab-duo/) capability, available in GitLab Duo Enterprise, can help simplify your workflow. Now, you can have a conversation with GitLab Duo Chat about an MR, directly inside GitLab.\n\n## What GitLab Duo Chat brings to an MR workflow\n\nImagine jumping into a merge request titled \"Add logging to order processing.\" Your goal is to onboard yourself to the MR as quickly as possible and to review it. You can use GitLab Duo Chat to onboard yourself faster and understand critical questions to accelerate your review:\n\n* \"Do the logs cover all failure scenarios, or are there any gaps where an issue might not be traceable?\"  \n* “Are there any potential privacy concerns with the logged data?\"  \n* \"Why was logging added at these specific points in the order processing workflow, and how does it help with debugging or monitoring?\"\n\n![MR context example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675670/Blog/Content%20Images/MR_Context_example.png)\n\nThese are the kinds of questions that GitLab Duo Chat is ready to answer – questions that let you quickly understand the intentions behind the changes and uncover any potential risks before diving into the details. Instead of spending a lot of time trying to follow code paths or waiting on the author to reply to your questions, you can start getting answers right away, saving valuable time.\n\n## In-depth conversations about MRs\n\nThe magic of this new chat capability isn’t just in summarizing code – it’s in its ability to support in-depth conversations about the MR at hand. Let's assume the logging MR also includes notifications and refactoring. You can ask specific, insightful questions, such as:\n\n* “What are the potential network failure points introduced by refactoring the payment service into a microservice?”  \n* \"Were there any trade-offs made in terms of consistency or accuracy for better performance?\"  \n* \"How are failures in sending notifications handled? Are retries implemented?\"\n\nInstead of simply telling you what changes have been made, GitLab Duo Chat helps you understand *why* those changes were made, what risks are involved, and how to mitigate them. It lets you dig deep and explore the context behind every line of code, every architectural decision, and every change in behavior within the specific MR you are working on.\n\nAnd it doesn't end with that one answer. You can engage in a follow-up conversation to dig deeper or to explore. \n\n## An evolving conversation tool\n\nWe’re really excited about how GitLab Duo Chat is evolving to become a true conversational partner for MR authors and reviewers alike. GitLab Duo Chat is [aware of the MR description, discussions, the code diff, and metadata of a single MR](https://docs.gitlab.com/ee/user/gitlab_duo_chat/index.html#the-context-chat-is-aware-of). It’s like having an assistant who is well-versed in your MR and ready to explain any part of it – or even rewrite parts, if that’s what you need.\n\nWith GitLab Duo Chat, onboarding yourself to a complex MR or understanding a change in-depth is faster and more intuitive than ever before.\n\n## We need your feedback\n\nWe’re eager to hear how GitLab Duo Chat works for you. All feedback helps us refine this feature and make it even more useful. Please share your experiences by commenting on our [issue tracker](https://gitlab.com/gitlab-org/gitlab/-/issues/464587). Please include the questions you asked, the response you got, and whether it helped you move forward. Together, we can make GitLab Duo Chat an indispensable tool for every merge request!\n\nFor a deeper dive into how to use GitLab Duo Chat, check out our [documentation](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples#ask-about-a-specific-merge-request) or watch our introductory video below. Start your first conversation today and let us know what you think!\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4muvSFuWWL4?si=7W4mHWw2iUOzoTUz\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->  \n\n> Sample this new capability with [a free trial of GitLab Ultimate and GitLab Duo Enterprise](https://gitlab.com/-/trials/new).\n\n## Learn more about GitLab Duo Chat\n\n- [GitLab Duo Chat: Get to know productivity-boosting AI enhancements](https://about.gitlab.com/blog/gitlab-duo-chat-get-to-know-productivity-boosting-ai-enhancements/)\n- [GitLab Duo Chat, your at-the-ready AI assistant, is now generally available](https://about.gitlab.com/blog/gitlab-duo-chat-now-generally-available/)\n- [GitLab Duo Chat 101: Get more done on GitLab with our AI assistant](https://about.gitlab.com/blog/gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant/)",[766,495,834,1035,767,9],{"slug":1201,"featured":6,"template":699},"chat-about-your-merge-request-with-gitlab-duo","content:en-us:blog:chat-about-your-merge-request-with-gitlab-duo.yml","Chat About Your Merge Request With Gitlab Duo","en-us/blog/chat-about-your-merge-request-with-gitlab-duo.yml","en-us/blog/chat-about-your-merge-request-with-gitlab-duo",{"_path":1207,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1208,"content":1214,"config":1220,"_id":1222,"_type":13,"title":1223,"_source":15,"_file":1224,"_stem":1225,"_extension":18},"/en-us/blog/ciso-secure-next-gen-software",{"title":1209,"description":1210,"ogTitle":1209,"ogDescription":1210,"noIndex":6,"ogImage":1211,"ogUrl":1212,"ogSiteName":685,"ogType":686,"canonicalUrls":1212,"schema":1213},"Securing next generation software","Scale your security efforts by understanding and integrating with the DevOps workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673038/Blog/Hero%20Images/ciso-secure-next-gen-software.jpg","https://about.gitlab.com/blog/ciso-secure-next-gen-software","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Securing next generation software\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cindy Blake\"}],\n        \"datePublished\": \"2020-01-27\",\n      }",{"title":1209,"description":1210,"authors":1215,"heroImage":1211,"date":1217,"body":1218,"category":718,"tags":1219},[1216],"Cindy Blake","2020-01-27","\nNext generation software has changed the way developers work, allowing them to \nproduce code quickly and at scale. This poses new security challenges \nhowever and all too often security is treated as a bolt-on task at the end of the \nprocess. Approaching security in this manner won’t scale to the size and \nvelocity of software development. It’s therefore critical that security \ninnovation finds its way into your development lifecycle. You can be sure \nthat your cyber-adversaries aren’t using hacking methods from 10 years ago – \nso why should you be using security technologies and methods from 10 years ago?\n\nTo tackle these changes, CISOs will need to understand three critical shifts in \nnext-generation software: \n\n1. How software is composed and executed\n1. How software is delivered and managed\n1. How software complies with regulatory requirements\n\nIt’s time to think of security as an outcome from an integrated DevSecOps effort.\n\nIn my recent book ([free to download here](/resources/ebook-ciso-secure-software/)) \nI explain these three shifts in depth to help security professionals understand \nnew application-related attack surfaces and areas of risk, how DevOps processes \nand tools affect their security efforts, and how security teams can adapt and \nscale to unite the iterative development and security workflows. \n\n## Secure software in the age of DevOps\n\nSecuring the software development lifecycle has never been easy, \nand efficiency-boosting development changes have created more challenges for \nsecurity teams to face. To be successful, CISOs and their teams need to be \nable to focus on:\n\n* Basic security hygiene\n* Monitoring, detection, and automated response\n* Building on standardization, policy automation, validation, common controls, \nand continuous improvement\n\n## Think it through\n\nAt the end of my book, you’ll find 10 steps to take as you work toward your \nnext generation security program. Here is a quick preview of a few of the steps:\n\n1. Start by assessing where you are, and decide on a path to move forward. \n1. Align metrics to manage risks, not silos. \n1. Go broad, not deep, when testing software. \n1. Apply continuous security scanning to iterative development.\n1. Apply Zero Trust principles to your applications and their infrastructure.\n\nCover image by [theverticalstory](https://unsplash.com/@theverticalstory) on [Unsplash](https://unsplash.com/photos/LjkEdYv55bA)\n{: .note}\n",[787,722,9],{"slug":1221,"featured":6,"template":699},"ciso-secure-next-gen-software","content:en-us:blog:ciso-secure-next-gen-software.yml","Ciso Secure Next Gen Software","en-us/blog/ciso-secure-next-gen-software.yml","en-us/blog/ciso-secure-next-gen-software",{"_path":1227,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1228,"content":1234,"config":1242,"_id":1244,"_type":13,"title":1245,"_source":15,"_file":1246,"_stem":1247,"_extension":18},"/en-us/blog/collaborating-on-a-cross-stage-feature",{"title":1229,"description":1230,"ogTitle":1229,"ogDescription":1230,"noIndex":6,"ogImage":1231,"ogUrl":1232,"ogSiteName":685,"ogType":686,"canonicalUrls":1232,"schema":1233},"How we tested a feature that affected (almost) all parts of GitLab","Crowd-sourcing testing across teams","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677856/Blog/Hero%20Images/collaboration.png","https://about.gitlab.com/blog/collaborating-on-a-cross-stage-feature","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we tested a feature that affected (almost) all parts of GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aakriti Gupta\"}],\n        \"datePublished\": \"2021-03-17\",\n      }",{"title":1229,"description":1230,"authors":1235,"heroImage":1231,"date":1237,"body":1238,"category":1239,"tags":1240},[1236],"Aakriti Gupta","2021-03-17","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nIn 13.9 Team Geo [released Maintenance Mode](https://about.gitlab.com/releases/2021/02/22/gitlab-13-9-released/#maintenance-mode), which was a large, cross stage and cross team project, a few milestones in the making.\n\nThis feature allows system administrators to put GitLab in a read-only mode. All parts of the system are affected and testing such a wide scope was challenging.\n\n## Why was testing this feature hard?\n\nAs we started testing with the QA team, it was clear that no one individual or team could know enough about the entire product to design a comprehensive QA plan. The more we tested, the more features we found to test - it was soon becoming an impossibly long list of tests to write for our small team.\n\nWe needed to prioritize manually testing the most important features, and save working on automated tests for another iteration.\n\nBut, what were the most important things to test?\n\nThis is where we decided to crowd-source testing. [We rolled-out discussion issues](https://gitlab.com/dashboard/issues?scope=all&utf8=%E2%9C%93&state=closed&author_username=aakriti.gupta&search=crowd-sourced+maintenance+mode+testing) to each of the 13 stages and asked them to contribute the three most important features that they own, that we should prioritise testing.\n\nWe used these issues to share knowledge of maintenance mode, and responsibility of its development, testing and documentation.\n\nThe response was overwhelming!\n\nProduct managers and engineers from across the development department contributed to our list of tests and collaboratively reviewed and improved documentation. They proactively asked how their features would behave and in some cases, even started MRs to fix the documentation.\n\nThe conversations helped us hone our plan for future iterations of this feature.\n\n## What we learned\n1\\. **Test iteratively and collaboratively**\n\nGet QA and developer teams working together early, instead of after development is almost done, or worse - after release. GitLab's [Quad planning](https://about.gitlab.com/handbook/engineering/quality/quality-engineering/quad-planning/) process was introduced last year to foster better collaboration between Quality, Development, UX, and Product teams. As [Jennie from QA](https://gitlab.com/jennielouie) chalked out a plan for QA together with developers, she found a few edge cases that would have otherwise been discovered too late.\n\n2\\. **Don’t hesitate to ask other teams to contribute**\n\nWhen we rolled out a dozen plus issues to all development teams, we were not sure if we’d get even a few responses, but we were overwhelmed with the interest, response and active participation that came from all the teams.\n\n3\\. **Communicate well**\n\nGive people enough and succinct information. When requesting help from other teams, help them prioritize the request by explaining the why.\n\n4\\. **Documentation as a form of developer communication**\n\nAs we worked through large documentation MRs, I realized the documentation was not only important for system administrators, but for developers of GitLab as well. Developers wanted to know how maintenance mode affected their features.\n\n5\\. **Iterate**\n\nKeep the discussions short-lived and focused on the most important aspects. Do not draw out the conversations too long, and move pending conversations over to follow-up issues.\nAs we learned of new test cases, [Nick from QA](https://gitlab.com/nwestbury) and I created follow-up test issues to resolve together with DRIs.\n\n6\\. **The more, the merrier**\n\nWhile the discussions started only with Engineering Managers and Product Managers, they often invited engineers in their conversations and this brought more eyes to the project and helped us answer a lot of unknowns.\n","unfiltered",[696,790,1241,789,9],"remote work",{"slug":1243,"featured":6,"template":699},"collaborating-on-a-cross-stage-feature","content:en-us:blog:collaborating-on-a-cross-stage-feature.yml","Collaborating On A Cross Stage Feature","en-us/blog/collaborating-on-a-cross-stage-feature.yml","en-us/blog/collaborating-on-a-cross-stage-feature",{"_path":1249,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1250,"content":1255,"config":1261,"_id":1263,"_type":13,"title":1264,"_source":15,"_file":1265,"_stem":1266,"_extension":18},"/en-us/blog/conan-c-cpp-package-management-integration",{"title":1251,"description":1252,"ogTitle":1251,"ogDescription":1252,"noIndex":6,"ogImage":803,"ogUrl":1253,"ogSiteName":685,"ogType":686,"canonicalUrls":1253,"schema":1254},"Modern C and C++: How Conan integration works in GitLab","Conan is a leading C and C++ package manager and it is now available in GitLab. Store and share packages easily with your teams or publicly.","https://about.gitlab.com/blog/conan-c-cpp-package-management-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Modern C and C++: How Conan integration works in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jordi Mon\"}],\n        \"datePublished\": \"2020-03-31\",\n      }",{"title":1251,"description":1252,"authors":1256,"heroImage":803,"date":1258,"body":1259,"category":832,"tags":1260},[1257],"Jordi Mon","2020-03-31","\n\nAs a single application for all the software development and delivery lifecycle, GitLab strives to support all the different software workflows and pipelines. Regardless of how complex this cycle might be (I’m looking at you C++), what we want to do is soothe these pains for C and C++ GitLab users. Following up on this metaphor, as doctors we would like to listen to the patient first: It all started with our community explaining their symptoms and chipping in the first ideas [here](https://gitlab.com/gitlab-org/gitlab-foss/issues/54747). This became even more relevant for GitLab when clients in C++ reliant industries like finance, robotics or embedded software added their interest to supporting package management for C++.\n\n### Conan is now available on GitLab\n\nThe C and C++ ecosystems have a ton of legacy tooling. It is what it is: they’ve been around for a long time and the community is, in a way, very DIY-driven. For example, many C++ libraries are advertised as “Zero deps inside.” This badge is intended as a sign of quality, and is even a bit of a status symbol for the devs and maintainers. That's fine for C/C++ developer but what about the users of such libs? Regardless of the actual quality of the lib’s code, if you wanted to use any of them, you’d better have a local, updated copy of them in a Git submodule. This is especially relevant for head-only monsters like Boost, the most popular set of libs in C++. In other words, in order to make use of them (that’s why they were created in the first place, I guess), you basically have to download the [source code](/solutions/source-code-management/), build it yourself (good luck with that), compile it and include the resulting binary in your project. This process can be time consuming and, if build processes are not well documented or supported, it can be exasperating. All of this can become a real nightmare if transitive dependencies are present, or if different [version control systems](/topics/version-control/) have been used. It's also tricky when deciding upon static or dynamic binaries, static or dynamic linking, single or multi-threaded, 32-bit or 64-bit…\n\n### How to build C and C++ packages in GitLab the Conan way\n\nThe GitLab Conan integration allows Conan users to set GitLab as the remote registry for their packages. Users will be able to set the remote and upload and install packages from GitLab’s registry. Think of it this way: you still use the same CLI to work with your Conan packages, but GitLab is on the receiving end. In doing so, GitLab creates the unique opportunity to have the code and package generated from the code living in the same place, freeing users from having to manage multiple services to store packages and code separately and still have them working together. This allows users to share private packages within an organization that is already using GitLab, publish public packages for general or open source use, and will open up many possibilities in utilizing GitLab’s CI pipelines to build and consume these packages automatically.\n\nCheck out a full demo:\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/2VVmrKNpC_0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n or a speedrun of Conan performed by the team in charge of the integration:\n\n \u003C!-- blank line -->\n \u003Cfigure class=\"video_container\">\n   \u003Ciframe src=\"https://www.youtube.com/embed/7NYgJWg-w5w\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n \u003C/figure>\n \u003C!-- blank line -->\n\nIf you need more help you can always refer to the [Conan docs](https://docs.conan.io/en/latest/).\n\n### The future of C and C++ in GitLab: Game development workflows!\n\nWhat’s coming next? In tradition with GitLab’s value of iteration, the initial release of Conan is a bare-bones API that allows you to publish and consume packages within GitLab. Next up will be a UI that displays much of the commonly referenced metadata for a given package, pre-written CI templates for automatic package publishing and consuming, less strict package naming conventions with remotes scoped to the group and project level within GitLab, and the list goes on.\n\n* [Conan Repository User Interface](https://gitlab.com/gitlab-org/gitlab/issues/33892)\n* [Project and Group level support for Conan Repository](https://gitlab.com/gitlab-org/gitlab/issues/11679)\n\nIf you are interested in package management at large, find a list of publicly available issues about the topic [here](https://gitlab.com/gitlab-org/gitlab/issues?label_name=Package+Repositories). Also, please note that if game development is your interest, large file support, partial clone and many other features that make game development possible with Git, will soon be available in GitLab. All the heavy lifting required for those massive binaries, engines, and animations will feel like feathers when we release those features. Stay tuned to know more about it in our newsletter.\n\n",[834,232,721,9],{"slug":1262,"featured":6,"template":699},"conan-c-cpp-package-management-integration","content:en-us:blog:conan-c-cpp-package-management-integration.yml","Conan C Cpp Package Management Integration","en-us/blog/conan-c-cpp-package-management-integration.yml","en-us/blog/conan-c-cpp-package-management-integration",{"_path":1268,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1269,"content":1275,"config":1282,"_id":1284,"_type":13,"title":1285,"_source":15,"_file":1286,"_stem":1287,"_extension":18},"/en-us/blog/connecting-gitlab-and-pantheon-streamline-wordpress-drupal-workflows",{"title":1270,"description":1271,"ogTitle":1270,"ogDescription":1271,"noIndex":6,"ogImage":1272,"ogUrl":1273,"ogSiteName":685,"ogType":686,"canonicalUrls":1273,"schema":1274},"Streamlining Drupal and WordPress with GitLab and Pantheon","Our guest author, a Developer Programs Engineer at Pantheon, shares how to automate WordPress deployments using GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680516/Blog/Hero%20Images/gitlab-pantheon.png","https://about.gitlab.com/blog/connecting-gitlab-and-pantheon-streamline-wordpress-drupal-workflows","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to connect GitLab and Pantheon to streamline Drupal and WordPress workflows\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Taylor\"}],\n        \"datePublished\": \"2019-03-26\",\n      }",{"title":1276,"description":1271,"authors":1277,"heroImage":1272,"date":1279,"body":1280,"category":832,"tags":1281},"How to connect GitLab and Pantheon to streamline Drupal and WordPress workflows",[1278],"Andrew Taylor","2019-03-26","As a member of the developer relations team at\n[Pantheon](https://pantheon.io), I’m always looking for new ways to help\nWordPress and Drupal developers solve workflow problems with automation. To\nthis end, I love exploring new tools and how they can be used effectively\ntogether.\n\n\n### One frequent problem I see teams facing is the dreaded single staging\nserver.\n\n\nIt’s not fun to wait in line for your turn to use the staging server or to\nsend clients a URL and tell them to review some work but ignore other,\nincomplete pieces.\n\n\n[Multidev environments](https://pantheon.io/docs/multidev/), one of\nPantheon’s advanced developer tools, solves this issue by allowing\nenvironments matching Git branches to be created on demand. Each multidev\nenvironment has its own URL and database, making independent work, QA, and\napproval possible without developers stepping on each other's toes.\n\n\nHowever, Pantheon doesn’t provide source control management (SCM) or\ncontinuous integration and continuous deployment (CI/CD) tooling. Instead,\nthe platform is flexible enough to be integrated with your preferred tools.\n\n\n### The next problem I see consistently is teams using different tools to\nmanage development work and to build and deploy that work.\n\n\nFor example, using one tool for SCM and something else for CI/CD. Having to\njump between tools to edit code and diagnose failing jobs is cumbersome.\n\n\n[GitLab](/) solves this problem by providing a full suite of development\nworkflow tools, such as SCM, with features like issues and merge requests,\nbest-in-class CI/CD, and a container registry, to name a few. I haven't come\nacross another application that is so complete to manage development\nworkflow.\n\n\nAs someone who loves automation, I explored connecting Pantheon to GitLab so\nthat commits to the master branch on GitLab deploy to the main dev\nenvironment on Pantheon. Additionally, merge requests on GitLab can create\nand deploy code to Pantheon multidev environments.\n\n\nThis tutorial will walk you through setting up the connection between GitLab\nand Pantheon so you, too, can streamline your WordPress and Drupal workflow.\n\n\nThis can be done with [GitLab repository\nmirroring](https://docs.gitlab.com/ee/user/project/repository/repository_mirroring.html),\nbut we will be setting it up manually to get some experience with [GitLab\nCI](https://docs.gitlab.com/ee/ci/) and have the ability to expand beyond\njust deployment in the future.\n\n\n## Background\n\n\nFor this post, you need to know that Pantheon breaks each site down into\nthree components: code, database, and files.\n\n\nThe code portion of a Pantheon site includes the CMS files, such as\nWordPress core, plugins and themes. These files are managed in a [Git\nrepository](https://git-scm.com/book/en/v2/Git-Basics-Getting-a-Git-Repository)\nhosted by Pantheon, which means we can deploy code from GitLab to Pantheon\nwith Git.\n\n\nWhen Pantheon refers to files, it is the media files, such as images, for\nyour site. These are typically uploaded by site users and are ignored in\nGit.\n\n\nYou can [create a free account](https://pantheon.io/register), learn more\nabout the [Pantheon workflow](https://pantheon.io/docs/pantheon-workflow),\nor [sign up for a live demo](https://pantheon.io/live-demo) on pantheon.io.\n\n\n## Assumptions\n\n\nMy project is named `pantheon-gitlab-blog-demo`, both on Pantheon and\nGitLab. You should use a unique project name. This tutorial uses a WordPress\nsite. Drupal can be substituted, but some modification will be needed.\n\n\nI'll also be using the [Git command\nline](https://git-scm.com/book/en/v2/Getting-Started-The-Command-Line) but\nyou can substitute a [graphical\ninterface](https://git-scm.com/book/en/v2/Appendix-A%3A-Git-in-Other-Environments-Graphical-Interfaces)\nif you prefer.\n\n\n## Create the projects\n\n\nFirst up, create a [new GitLab\nproject](https://docs.gitlab.com/ee/user/project/working_with_projects.html#create-a-project)\n– we'll come back to this in a little bit.\n\n\nNow, [create a new WordPress site on\nPantheon](https://pantheon.io/docs/launch-wordpress/). After your new site\nis created, you will need to install WordPress for the site dashboard.\n\n\n_You might be tempted to make some changes, such as adding or removing\nplugins, but please refrain. We haven't connected the site to GitLab yet and\nwant to make sure all code changes, e.g. adding or removing plugins, go\nthrough GitLab._\n\n\nAfter WordPress is installed, go back to the Pantheon site dashboard and\nchange the development mode to Git.\n\n\n![Pantheon\nDashboard](https://about.gitlab.com/images/blogimages/pantheon-dashboard-after-fresh-wordpress-install.png){:\n.shadow.medium.center}\n\n\n## Initial commit to GitLab\n\n\nNext, we need to get the starting WordPress code from the Pantheon site over\nto GitLab. In order to do this, we will clone the code from the Pantheon\nsite Git repository locally, then push it to the GitLab repository.\n\n\nTo make this easier, and more secure, [add an SSH key to\nPantheon](https://pantheon.io/docs/ssh-keys/) to avoid entering your\npassword when cloning Pantheon Git repository. While you're at it, [add an\nSSH key to GitLab](https://docs.gitlab.com/ee/ssh/) as well.\n\n\nTo do this, clone the Pantheon site locally by copying the command in the\nClone with Git drop-down field from the site dashboard.\n\n\n![CPantheon git\nconnection](https://about.gitlab.com/images/blogimages/pantheon-git-connection-info.png){:\n.shadow.center}\n\n\n_If you need help, see the [Pantheon Start With\nGit](https://pantheon.io/docs/git/#clone-your-site-codebase) documentation._\n\n\nNext, we want to change the `git remote origin` to point to GitLab, instead\nof Pantheon. This can be done with the [`git remote`\ncommand](https://git-scm.com/docs/git-remote).\n\n\nHead over to your GitLab project and grab the repository URL, which can be\nfound at in the Clone drop-down of the project details screen. Be sure to\nuse the Clone with SSH variant of the GitLab repository URL, since we set up\nan SSH key earlier.\n\n\n![Gitlab git\nconnection](https://about.gitlab.com/images/blogimages/gitlab-git-connection-info.png){:\n.shadow.medium.center}\n\n\nThe default `git remote` for the local copy of our code repository is\n`origin`. We can change it with `git remote set-url origin [GitLab\nrepository URL]`, replacing `[GitLab repository URL]` with your actual\nGitLab repository URL.\n\n\nFinally, run `git push origin master --force` to send the WordPress code\nfrom the Pantheon site to GitLab.\n\n\n_The --force flag is only needed as part of this one-time step. Subsequent\n`git push` commands to GitLab won't need it._\n\n\n## Set up credentials and variables\n\n\nRemember how we added an SSH key locally to authorize with Pantheon and\nGitLab? Well, an SSH token can also be used to authorize GitLab and\nPantheon.\n\n\nGitLab has some great documentation, and we will be looking at the [SSH keys\nwhen using the Docker executor section of the Using SSH keys with GitLab\nCI/CD\ndoc](https://docs.gitlab.com/ee/ci/ssh_keys/#ssh-keys-when-using-the-docker-executor).\n\n\nAt this point, we will need to do the first two steps: _Create a new SSH key\npair locally with ssh-keygen and Add the private key as a variable to your\nproject._\n\n\nWhen done, `SSH_PRIVATE_KEY` should be set as a [GitLab CI/CD Environment\nVariables](https://docs.gitlab.com/ee/ci/variables/) in the project\nsettings.\n\n\nTo take care of the third and fourth steps, create `.gitlab-ci.yml` file\nwith the following contents:\n\n\n```\n\nbefore_script:\n  # See https://docs.gitlab.com/ee/ci/ssh_keys/\n  - eval $(ssh-agent -s)\n  - echo \"$SSH_PRIVATE_KEY\" | tr -d '\\r' | ssh-add - > /dev/null\n  - mkdir -p $HOME/.ssh && echo \"StrictHostKeyChecking no\" >> \"$HOME/.ssh/config\"\n  - git config --global user.email \"$GITLAB_USER_EMAIL\"\n  - git config --global user.name \"Gitlab CI\"\n  ```\n\nDon't commit the `.gitlab-ci.yml` file just yet, we will be adding more to\nit in the next section.\n\n\nNow, we need to take care of step 5, _add the public key from the one you\ncreated in the first step to the services that you want to have an access to\nfrom within the build environment._\n\n\nIn our case, the service we want to access from GitLab is Pantheon. Follow\nthe Pantheon doc to [Add Your SSH Key to\nPantheon](https://pantheon.io/docs/ssh-keys/#add-your-ssh-key-to-pantheon)\nto complete this step.\n\n\n_Be sure that the private SSH key is in GitLab and the public key is on\nPantheon_\n\n\nWe will also need to set some additional environment variables. The first\none should be named PANTHEON_SITE, and the value will be the machine name of\nyour `Pantheon site`. and the value will be the *machine name* of your\nPantheon site.\n\n\nYou can get the machine name from the end of the Clone with Git command.\nSince you already cloned the site locally, it will be the directory name of\nyour local repository.\n\n\n![wordpress machine\nname](https://about.gitlab.com/images/blogimages/pantheon-machine-name.png){:\n.shadow.medium.center}\n\n\nThe next GitLab CI environment variable to set is `PANTHEON_GIT_URL`, which\nwill be the Git repository URL of the Pantheon site that we used earlier.\n\n\n_Enter just the SSH repository URL, leaving off `git clone` and the site\nmachine name at the end._\n\n\nPhew! Now that setup is done, we can move on to finishing our\n`.gitlab-ci.yml` file.\n\n\n## Create the deployment job\n\n\nWhat we will be doing with GitLab CI initially is very similar to what we\ndid with Git repositories earlier. This time though, we will add the\nPantheon repository as a second Git remote and then push the code from\nGitLab to Pantheon.\n\n\nTo do this, we will set up a\n[stage](https://docs.gitlab.com/ee/ci/yaml/#stages) named `deploy` and a\n[job](https://docs.gitlab.com/ee/ci/jobs/) named `deploy:dev`, as it will\ndeploy to the dev environment on Pantheon. The resulting `.gitlab-ci.yml`\nfile should look like this:\n\n\n```\n\nstages:\n\n- deploy\n\n\nbefore_script:\n  # See https://docs.gitlab.com/ee/ci/ssh_keys/\n  - eval $(ssh-agent -s)\n  - echo \"$SSH_PRIVATE_KEY\" | tr -d '\\r' | ssh-add - > /dev/null\n  - mkdir -p $HOME/.ssh && echo \"StrictHostKeyChecking no\" >> \"$HOME/.ssh/config\"\n  - git config --global user.email \"$GITLAB_USER_EMAIL\"\n  - git config --global user.name \"Gitlab CI\"\n\ndeploy:dev:\n  stage: deploy\n  environment:\n    name: dev\n    url: https://dev-$PANTHEON_SITE.pantheonsite.io/\n  script:\n    - git remote add pantheon $PANTHEON_GIT_URL\n    - git push pantheon master --force\n  only:\n    - master\n```\n\n\n`SSH_PRIVATE_KEY`, `PANTHEON_SITE`, and `PANTHEON_GIT_URL` should all look\nfamiliar - they are the environment variables we set up earlier. Having\nenvironment variables will allow us to re-use the values multiple times in\nour `.gitlab-ci.yml` file, while having one place to update them, should\nthey change in the future.\n\n\nFinally, add, commit, and push the `.gitlab-ci.yml` file to send it to\nGitLab.\n\n\n## Verify the deployment\n\n\nIf everything was done correctly, the `deploy:dev` job run on GitLab CI/CD,\nsucceed and send the `.gitlab-ci.yml` commit to Pantheon. Let's take a look!\n\n\n![deploy\njob](https://about.gitlab.com/images/blogimages/gitlab-deploy-dev-job.png){:\n.shadow.center}\n\n\n![deploy job\npassing](https://about.gitlab.com/images/blogimages/gitlab-deploy-dev-job-passed.png){:\n.shadow.center}\n\n\n![gitlab commit on pantheon\ndev](https://about.gitlab.com/images/blogimages/gitlab-commits-on-pantheon-dev.png){:\n.shadow.center}\n\n\n## Sending merge request branches to Pantheon\n\n\nThis next section makes use of my favorite Pantheon feature,\n[multidev](https://pantheon.io/docs/multidev), which allows you to create\nadditional Pantheon environments on demand associated with Git branches.\n\n\nThis section is entirely optional as [multidev access is\nrestricted](https://pantheon.io/docs/multidev-faq/), however, if you do have\nmultidev access, having GitLab merge requests automatically create multidev\nenvironments on Pantheon is a huge workflow improvement.\n\n\nWe will start by making a new Git branch locally with `git checkout -b\nmultidev-support`. Now, let's edit `.gitlab-ci.yml` again.\n\n\nI like to use the merge request number in the Pantheon environment name. For\nexample, the first merge request would be `mr-1`, the second would be\n`mr-2`, and so on.\n\n\nSince the merge request changes, we need to define these Pantheon branch\nnames dynamically. GitLab makes this easy by providing [predefined\nenvironment](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html)\nvariables.\n\n\nWe can use `$CI_MERGE_REQUEST_IID`, which provides the merge request number.\nLet's put that to use, along with our global environment variables from\nearlier, and add a new deploy:multidev job to the end of our\n`.gitlab-ci.yml` file.\n\n\n```\n\ndeploy:multidev:\n  stage: deploy\n  environment:\n    name: multidev/mr-$CI_MERGE_REQUEST_IID\n    url: https://mr-$CI_MERGE_REQUEST_IID-$PANTHEON_SITE.pantheonsite.io/\n  script:\n    # Checkout the merge request source branch\n    - git checkout $CI_COMMIT_REF_NAME\n    # Add the Pantheon git repository as an additional remote\n    - git remote add pantheon $PANTHEON_GIT_URL\n    # Push the merge request source branch to Pantheon\n    - git push pantheon $CI_COMMIT_REF_NAME:mr-$CI_MERGE_REQUEST_IID --force\n  only:\n    - merge_requests\n```\n\n\nThis should look very similar to our `deploy:dev` job, only pushing a branch\nto Pantheon instead of `master`.\n\n\nAfter you add and commit the updated `.gitlab-ci.yml` file, push this new\nbranch to GitLab with `git push -u origin multidev-support`.\n\n\nNext, let's create a new merge request from our `multidev-support` branch by\nfollowing the _Create merge request_ prompt.\n\n\n![create merge\nrequest](https://about.gitlab.com/images/blogimages/gitlab-create-merge-request-prompt.png){:\n.shadow.medium.center}\n\n\nAfter creating the merge request, look for the  CI/CD job `deploy:multidev`\nto run.\n\n\n![multidev deploy\nsuccess](https://about.gitlab.com/images/blogimages/multidev-branch-deploy-success.png){:\n.shadow.medium.center}\n\n\nLook at that – a new branch was sent to Pantheon. However, when we go to the\nmultidev section of the site dashboard on Pantheon there isn't a new\nmultidev environment.\n\n\n![multidev\nbranch](https://about.gitlab.com/images/blogimages/pantheon-no-multidev-environments.png){:\n.shadow.medium.center}\n\n\nLet's look at the _Git_ Branches section.\n\n\n![mr\nbranch](https://about.gitlab.com/images/blogimages/pantheon-mr-1-branch.png){:\n.shadow.medium.center}\n\n\nOur `mr-1` branch did make it to Pantheon after all. Go ahead and create an\nenvironment from the `mr-1` branch.\n\n\n![create\nmultidev](https://about.gitlab.com/images/blogimages/pantheon-mr-1-multidev-creation.png){:\n.shadow.medium.center}\n\n\nOnce the multidev environment has been created, head back to GitLab and look\nat the _Operations > Environments_ section. You will notice entries for\n`dev` and `mr-1`.\n\n\nThis is because we added an `environment` entry with `name` and `url` to our\nCI/CD jobs. If you click on the open environment icon, you will be taken to\nthe URL for the multidev on Pantheon.\n\n\n## Automating multidev creation\n\n\nWe _could_ stop here and try to remember to create a multidev environment\neach time there is a new merge request, but we can automate that process as\nwell!\n\n\nPantheon has a command line tool,\n[Terminus](https://pantheon.io/docs/terminus/), that allows you to interact\nwith the platform in an automated fashion. Terminus will allow us to\nprovision our multidev environments from the command line – perfect for use\nin [GitLab CI](https://docs.gitlab.com/ee/ci/).\n\n\nWe will need a new merge request to test this, so let's create a new branch\nwith `git checkout -b auto-multidev-creation`.\n\n\nIn order to use Terminus in GitLab CI/CD jobs we will need a machine token\nto authenticate with Terminus and a container image with Terminus available.\n\n\n[Create a Pantheon machine\ntoken](https://pantheon.io/docs/machine-tokens/#create-a-machine-token),\nsave it to a safe place, and add it as a global GitLab environment variable\nnamed `PANTHEON_MACHINE_TOKEN`.\n\n\n_If you don't remember how to add GitLab environment variables, scroll up to\nwhere we defined `PANTHEON_SITE` earlier in the tutorial._\n\n\n## Building a Dockerfile with Terminus\n\n\nIf you don't have Docker or aren't comfortable working with `Dockerfile`\nfiles, you can use my image\n`registry.gitlab.com/ataylorme/pantheon-gitlab-blog-demo:latest` and skip\nthis section.\n\n\n[GitLab has a container\nregistry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html)\nthat allows us to build and host a Dockerfile for use in our project. Let's\ncreate a Dockerfile that has Terminus available, so we can interact with\nPantheon.\n\n\nTerminus is a PHP-based command line tool, so we will start with a PHP\nimage. I prefer to install Terminus via Composer so I'll be using [the\nofficial Docker Composer image](https://hub.docker.com/_/composer) as a\nbase. Create a `Dockerfile` in your local repository directory with the\nfollowing contents:\n\n\n```\n\n# Use the official Composer image as a parent image\n\nFROM composer:1.8\n\n\n# Update/upgrade apk\n\nRUN apk update\n\nRUN apk upgrade\n\n\n# Make the Terminus directory\n\nRUN mkdir -p /usr/local/share/terminus\n\n\n# Install Terminus 2.x with Composer\n\nRUN /usr/bin/env COMPOSER_BIN_DIR=/usr/local/bin composer -n\n--working-dir=/usr/local/share/terminus require\npantheon-systems/terminus:\"^2\"\n\n```\n\nFollow the _Build and push images_ section of the [container registry\ndocumentation](https://gitlab.com/help/user/project/container_registry#build-and-push-images)\nto build an image from the `Dockerfile` and upload it to GitLab.\n\n\nVisit the _Registry_ section of your GitLab project. If things went\naccording to plan you will see your image listed. Make a note of the image\ntag link, as we will need to use that in our `.gitlab-ci.yml` file.\n\n\n![container\nregistry](https://about.gitlab.com/images/blogimages/gitlab-container-registry.png){:\n.shadow.center}\n\n\nThe `script` section of our `deploy:multidev` job is starting to get long,\nso let's move it to a dedicated file. Create a new file\n`private/multidev-deploy.sh` with the following contents:\n\n\n```\n\n#!/bin/bash\n\n\n# Store the mr- environment name\n\nexport PANTHEON_ENV=mr-$CI_MERGE_REQUEST_IID\n\n\n# Authenticate with Terminus\n\nterminus auth:login --machine-token=$PANTHEON_MACHINE_TOKEN\n\n\n# Checkout the merge request source branch\n\ngit checkout $CI_COMMIT_REF_NAME\n\n\n# Add the Pantheon Git repository as an additional remote\n\ngit remote add pantheon $PANTHEON_GIT_URL\n\n\n# Push the merge request source branch to Pantheon\n\ngit push pantheon $CI_COMMIT_REF_NAME:$PANTHEON_ENV --force\n\n\n# Create a function for determining if a multidev exists\n\nTERMINUS_DOES_MULTIDEV_EXIST()\n\n{\n    # Stash a list of Pantheon multidev environments\n    PANTHEON_MULTIDEV_LIST=\"$(terminus multidev:list ${PANTHEON_SITE} --format=list --field=id)\"\n\n    while read -r multiDev; do\n        if [[ \"${multiDev}\" == \"$1\" ]]\n        then\n            return 0;\n        fi\n    done \u003C\u003C\u003C \"$PANTHEON_MULTIDEV_LIST\"\n\n    return 1;\n}\n\n\n# If the mutltidev doesn't exist\n\nif ! TERMINUS_DOES_MULTIDEV_EXIST $PANTHEON_ENV\n\nthen\n    # Create it with Terminus\n    echo \"No multidev for $PANTHEON_ENV found, creating one...\"\n    terminus multidev:create $PANTHEON_SITE.dev $PANTHEON_ENV\nelse\n    echo \"The multidev $PANTHEON_ENV already exists, skipping creating it...\"\nfi\n\n```\n\n\nThe script is in the `private` directory as [it is not web accessible on\nPantheon](https://pantheon.io/docs/private-paths/). Now that we have a\nscript for our multidev logic, update the `deploy:multidev` section of\n`.gitlab-ci.yml` so that it looks like this:\n\n\n```\n\ndeploy:multidev:\n  stage: deploy\n  environment:\n    name: multidev/mr-$CI_MERGE_REQUEST_IID\n    url: https://mr-$CI_MERGE_REQUEST_IID-$PANTHEON_SITE.pantheonsite.io/\n  script:\n    # Run the multidev deploy script\n    - \"/bin/bash ./private/multidev-deploy.sh\"\n  only:\n    - merge_requests\n```\n\n\nIn order to make sure our jobs run with the custom image created earlier,\nadd an `image` definition with the registry URL to `.gitlab-ci.yml`. My\ncomplete `.gitlab-ci.yml` file now looks like this:\n\n\n```\n\nimage: registry.gitlab.com/ataylorme/pantheon-gitlab-blog-demo:latest\n\n\nstages:\n\n- deploy\n\n\nbefore_script:\n  # See https://docs.gitlab.com/ee/ci/ssh_keys/\n  - eval $(ssh-agent -s)\n  - echo \"$SSH_PRIVATE_KEY\" | tr -d '\\r' | ssh-add - > /dev/null\n  - mkdir -p $HOME/.ssh && echo \"StrictHostKeyChecking no\" >> \"$HOME/.ssh/config\"\n  - git config --global user.email \"$GITLAB_USER_EMAIL\"\n  - git config --global user.name \"Gitlab CI\"\n\ndeploy:dev:\n  stage: deploy\n  environment:\n    name: dev\n    url: https://dev-$PANTHEON_SITE.pantheonsite.io/\n  script:\n    - git remote add pantheon $PANTHEON_GIT_URL\n    - git push pantheon master --force\n  only:\n    - master\n\ndeploy:multidev:\n  stage: deploy\n  environment:\n    name: multidev/mr-$CI_MERGE_REQUEST_IID\n    url: https://mr-$CI_MERGE_REQUEST_IID-$PANTHEON_SITE.pantheonsite.io/\n  script:\n    # Run the multidev deploy script\n    - \"/bin/bash ./private/multidev-deploy.sh\"\n  only:\n    - merge_requests\n```\n\n\nAdd, commit, and push `private/multidev-deploy.sh` and `.gitlab-ci.yml`.\nNow, head back to GitLab and wait for the CI/CD job to finish. The multidev\ncreation takes a few minutes, so be patient.\n\n\nWhen it is finished, go check out the multidev list on Pantheon. Voila! The\n`mr-2` multidev is there.\n\n\n![mr-2](https://about.gitlab.com/images/blogimages/pantheon-mr-2-multidev.png){:\n.shadow.medium.center}\n\n\n## Conclusion\n\n\nOpening a merge request and having an environment spin up automatically is a\npowerful addition to any team's workflow.\n\n\nBy leveraging the powerful tools offered by both GitLab and Pantheon, we can\nconnect GitLab to Pantheon in an automated fashion.\n\n\nSince we used GitLab CI/CD, there is room for growth in our workflow as\nwell. Here are a few ideas to get you started:\n\n* Add a build step.\n\n* Add automated testing.\n\n* Add a job to enforce coding standards.\n\n* Add [dynamic application security\ntesting](https://docs.gitlab.com/ee/user/application_security/dast/).\n\n\nDrop me a line with any thoughts you have on GitLab, Pantheon, and\nautomation.\n\n\nP.S. Did you know Terminus, Pantheon’s command line tool, [is extendable via\nplugins](https://pantheon.io/docs/terminus/plugins/)?\n\n\nOver at Pantheon, we have been hard at work on version 2 of our [Terminus\nBuild Tools\nPlugin](https://github.com/pantheon-systems/terminus-build-tools-plugin/),\ncomplete with GitLab support. If you don't want to do all this setup for\neach project, I encourage you to check it out and help us test the v2 beta.\nThe terminus `build:project:create` command just needs a Pantheon token and\nGitLab token. From there, it will spin up one of our example projects,\ncomplete with Composer and automated testing, create a new project on\nGitLab, a new site on Pantheon, and connect the two by setting up\nenvironment variables and SSH keys.\n\n\n### About the guest author\n\n\nAndrew Taylor is a Developer Programs Engineer at\n[Pantheon](https://pantheon.io/).\n",[722,232,268,9],{"slug":1283,"featured":6,"template":699},"connecting-gitlab-and-pantheon-streamline-wordpress-drupal-workflows","content:en-us:blog:connecting-gitlab-and-pantheon-streamline-wordpress-drupal-workflows.yml","Connecting Gitlab And Pantheon Streamline Wordpress Drupal Workflows","en-us/blog/connecting-gitlab-and-pantheon-streamline-wordpress-drupal-workflows.yml","en-us/blog/connecting-gitlab-and-pantheon-streamline-wordpress-drupal-workflows",{"_path":1289,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1290,"content":1296,"config":1302,"_id":1304,"_type":13,"title":1305,"_source":15,"_file":1306,"_stem":1307,"_extension":18},"/en-us/blog/create-vision",{"title":1291,"description":1292,"ogTitle":1291,"ogDescription":1292,"noIndex":6,"ogImage":1293,"ogUrl":1294,"ogSiteName":685,"ogType":686,"canonicalUrls":1294,"schema":1295},"GitLab's 2019 product vision for DevOps Create","Take an early look at where collaboration, merge requests, and the Web IDE are heading in 2019.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678812/Blog/Hero%20Images/web-ide-cover.jpg","https://about.gitlab.com/blog/create-vision","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's 2019 product vision for DevOps Create\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"James Ramsay\"}],\n        \"datePublished\": \"2018-09-21\",\n      }",{"title":1291,"description":1292,"authors":1297,"heroImage":1293,"date":1299,"body":1300,"category":300,"tags":1301},[1298],"James Ramsay","2018-09-21","\nGitLab is a single application, so for convenience we organize by [DevOps stages](https://handbook.gitlab.com/handbook/product/categories/). The Create stage of the DevOps lifecycle is about creating code, and includes Git repositories, merge requests, code review, the Web IDE, wikis, and snippets.\n\nManaging source code is at the heart of GitLab – it's in our name and it powers your applications. This year we've shipped many important improvements to make it easier to go from idea to production. The [Web IDE](/releases/2018/06/22/gitlab-11-0-released/#cicd-pipeline-status-and-job-traces-in-the-web-ide) makes it easy for anyone to contribute, and faster to work with merge requests. [Squash and Merge](/releases/2018/06/22/gitlab-11-0-released/#squash-and-merge-in-gitlab-core-and-gitlabcom-free), and [Rebase and Fast-forward Merge](/releases/2018/01/22/gitlab-10-4-released/#rebase-and-fast-forward-in-ce) are available in GitLab CE. [File locking](/releases/2018/02/22/gitlab-10-5-released/#git-lfs-2-locking-support) is integrated with Git LFS. [Maintainers can push to forks](/releases/2018/03/22/gitlab-10-6-released/#maintainers-can-push-to-mr-from-fork). And there is much more to come this year, like [batch comments](https://gitlab.com/gitlab-org/gitlab-ee/issues/1984) for merge requests, and [suggested approvers](https://gitlab.com/gitlab-org/gitlab-ee/issues/5382) based on code owners.\n\nHere are some of the things we're thinking about for 2019:\n\n- [Collaboration](#collaboration)\n- [Code review and approvals](#code-review-and-approvals)\n- [Web IDE](#web-ide)\n- [Summary](#summing-up)\n\nAs our plans are always in draft, we'd love to hear your thoughts, and any suggestions.\n\n### Collaboration\n\nGit's distributed design made new collaborative workflows possible, and forking has made collaboration even easier. Forking is the workflow of choice for open source, and for the same reasons it is also great for private organizations. We want to remove the barriers to collaboration and [inner sourcing](/topics/version-control/what-is-innersource/), but also make it easier to collaborate with external open source projects too.\n\nThe distributed capabilities of Git aren't limited to a single server. Open source software is used extensively in commercial applications of all kinds, but collaboration between open source projects and commercial is difficult. Features and bug fixes to open source projects can sit in stale forks in private Git repositories for lack of tools and process. [Distributed merge requests](https://gitlab.com/groups/gitlab-org/-/epics/260) will make it easy publish a patch from a private GitLab instance to a public upstream server, be it GitLab, GitHub or Bitbucket. Teams will be able to work on a patch privately following internal processes, but instead of merging the reviewed and tested change privately, it can be published to a new public merge request upstream. Contributing fixes and features upstream isn't only good for the community, but it also makes commercial sense by eliminating the costly task of keeping a stale, private fork up to date. We want to make it easy for everyone to contribute to open source software, as individuals and as companies!\n\n![Mockup of distributed merge request widget](https://about.gitlab.com/images/blogimages/merge-request-distributed.png){: .medium.center.shadow}\n\nWe'll also be improving simpler forking workflows too with important quality-of-life improvements. To make it easy to see how far behind or diverged your fork is, we will make it possible to [compare branches](https://gitlab.com/gitlab-org/gitlab-ce/issues/19788) across forks and [cherry pick](https://gitlab.com/gitlab-org/gitlab-ce/issues/43568) changes directly from the upstream project into your fork. Forks of private projects will also [inherit permissions](https://gitlab.com/gitlab-org/gitlab-ce/issues/8935) from the upstream project, making it possible for upstream maintainers to rebase stale merge requests and help contributors. This will allow teams to adopt forking workflows without needing to make every project public to the world or to the organization.\n\n### Code review and approvals\n\nMerge requests are key to the workflows that allow teams to iterate rapidly and ship amazing products quickly, by bringing together all the important information in a single place. Critical to this workflow is the code review, and we want GitLab to be the best tool for doing code reviews.\n\nAutomatic code quality and linting tools can prevent code reviews becoming simple code style reviews, but without the inline feedback a reviewer can't be sure which problems have been automatically detected. A new [API for line by line code quality feedback](https://gitlab.com/gitlab-org/gitlab-ce/issues/50299) will allow output from tools to be rendered natively in GitLab in the merge request diff. Merge request authors will have a single source of truth, and code reviewers can confidently focus on important structural feedback.\n\nCode review feedback cannot truly be resolved and the merge request approved until the reviewer checks the feedback was correctly addressed. This step prevents feedback from being misunderstood or overlooked, but it is currently difficult and time consuming. We are going to streamline this important step by allowing you to [review changes since code review](https://gitlab.com/groups/gitlab-org/-/epics/314) and making [merge request diffs smarter](https://gitlab.com/groups/gitlab-org/-/epics/340). When the change is straightforward, we're going to make it possible to simply [propose a change](https://gitlab.com/gitlab-org/gitlab-ce/issues/18008) as easily as leaving a comment that can be applied with a single click – no more copying and pasting `sed` one liners! And we're going to make it easier to [view and add comments to commits](https://gitlab.com/gitlab-org/gitlab-ee/issues/1769) at any time.\n\nIn the real world, complex features often require large, complex merge requests. We will support these situations better with [commit by commit code review](https://gitlab.com/groups/gitlab-org/-/epics/285), autosquashing [`fixup!`](https://gitlab.com/gitlab-org/gitlab-ee/issues/212) and [`squash!`](https://gitlab.com/gitlab-org/gitlab-ce/issues/50400) commits, and allowing you to [preview](https://gitlab.com/gitlab-org/gitlab-ee/issues/7259) the resultant squashed commits.\n\nComplex real-world changes also need good commit messages, but commit messages are too easily neglected. Without good commit messages, debugging a regression, or modifying an important existing function is painful and error prone. To help teams adopt best practice [commit hygiene](/blog/keeping-git-commit-history-clean/), we will make [commit messages part of code review](https://gitlab.com/groups/gitlab-org/-/epics/286) by allowing comments on commit messages, improving the [visibility of commit messages](https://gitlab.com/gitlab-org/gitlab-ce/issues/49803), and making [squash and merge smarter](https://gitlab.com/gitlab-org/gitlab-ce/issues/47149). GitLab should celebrate great commit messages and amplify their benefits to make it easier for teams to adopt best practices.\n\n### Web IDE\n\nIn 2018 we're building a strong foundation for a cloud development environment with [client side evaluation](https://gitlab.com/gitlab-org/gitlab-ce/issues/47268) and [server side evaluation](https://gitlab.com/gitlab-org/gitlab-ee/issues/4013) powered live previews, and server side evaluation will also enable a [web terminal](https://gitlab.com/gitlab-org/gitlab-ee/issues/5426) to test your changes in real time. IDEs are also very personal and should support customization, to make it easy to move between your local IDE and GitLab IDE. Please share your feedback, and consider contributing – I'd love to see support for [dark syntax themes](https://gitlab.com/gitlab-org/gitlab-ce/issues/46334) and [vim keybindings](https://gitlab.com/gitlab-org/gitlab-ce/issues/47930)!\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/sSWu6TyubTE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThe Web IDE makes it easier than ever to resolve code review feedback, reducing the need to switch context in your local development environment, but we can make it even better. Addressing a comprehensive code review still requires switching backwards and forwards between the merge request and the Web IDE. [Line by line code quality feedback](https://gitlab.com/gitlab-org/gitlab-ce/issues/50299) available in the merge request diff will also be available in the Web IDE as will [live linting feedback](https://gitlab.com/groups/gitlab-org/-/epics/70) powered by server side evaluation so to help prevent new code styling problems being created while resolving feedback.\n\nWe are also considering integrating [merge request discussions](https://gitlab.com/groups/gitlab-org/-/epics/72) so that code review comments can be addressed without needing to continually switch between tabs. We don't think the Web IDE should replace the merge request, nor should every feature be duplicated into it, but do think the Web IDE can further simplify the process for resolving code review feedback so teams can iterate faster.\n\n### Summing up\n\nWriting, reviewing, and merging code is where the rubber hits the road when taking your app from idea to production, and in 2019 we want it to be better than ever before!\n\nThe [GitLab product vision](/direction/) is public so you can read up on what we're thinking about at any time, about every part of the product. Please join the conversation and share your feedback on these ideas, and offer ideas of your own! Your contributions – idea or code – are welcomed and appreciated so that we can all work together to make GitLab the best application to build and ship your next great idea.\n",[790,834,696,9,722],{"slug":1303,"featured":6,"template":699},"create-vision","content:en-us:blog:create-vision.yml","Create Vision","en-us/blog/create-vision.yml","en-us/blog/create-vision",{"_path":1309,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1310,"content":1316,"config":1324,"_id":1326,"_type":13,"title":1327,"_source":15,"_file":1328,"_stem":1329,"_extension":18},"/en-us/blog/creating-a-dark-ui-for-gitlabs-web-ide",{"title":1311,"description":1312,"ogTitle":1311,"ogDescription":1312,"noIndex":6,"ogImage":1313,"ogUrl":1314,"ogSiteName":685,"ogType":686,"canonicalUrls":1314,"schema":1315},"How we created a dark UI for GitLab's Web IDE","The Web IDE now has a Dark Mode, and we've put together a few learnings from a design perspective.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669611/Blog/Hero%20Images/ide-dark-post-banner.png","https://about.gitlab.com/blog/creating-a-dark-ui-for-gitlabs-web-ide","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we created a dark UI for GitLab's Web IDE\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marcel van Remmerden\"},{\"@type\":\"Person\",\"name\":\"Jeremy Elder\"}],\n        \"datePublished\": \"2020-05-20\",\n      }",{"title":1311,"description":1312,"authors":1317,"heroImage":1313,"date":1320,"body":1321,"category":832,"tags":1322},[1318,1319],"Marcel van Remmerden","Jeremy Elder","2020-05-20","\n\nOne of the most popular and exciting feature requests we often hear about from our amazing community is a [dark mode for the entire GitLab UI](https://gitlab.com/gitlab-org/gitlab/-/issues/14531). It's currently the second most upvoted issue for all of GitLab.\n\nNext to being very popular in the design and development world, a dark mode can be incredibly helpful for users with vision impairments. One of our community members posted this comment, that demonstrates very well how valuable it can be to give users the chance to choose between a light and a dark mode:\n\n> It really comes down to website accessibility. I am legally blind and part of my eye condition is something called photophobia (which is poorly named—it's not a \"fear\" of light, it's that direct bright lights, especially sudden direct bright lights, are like having an ice pick shoved into my eyeballs.)\n\nAt GitLab, we believe in small changes and fast iterations. When our Design team was thinking about how we could split this up and tackle it in small steps, we looked for isolated pieces of our UI that we could create a dark mode for, and the feature that stood out was the [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/#web-ide).\n\n## What is the Web IDE?\n\nThe Web IDE (Integrated Development Environment) is a code editor in the browser that allows you to change multiple files at once. Afterwards, you can commit their changes to a branch and create merge requests to discuss those changes and eventually merge them.\n\n![GitLab Web IDE](https://about.gitlab.com/images/blogimages/ide-dark-light-mode-browser.png){: .center}\nThe GitLab Web IDE\n{: .note.text-center}\n\nUsers of the Web IDE find it to be helpful for quickly making small changes or easily viewing their files in a familiar context, similar to their appearance in a local editor.\n\n### Syntax highlighting\n\nAfter deciding the Web IDE would be the first feature of the GitLab user interface (UI) to get a dark mode, we faced one fundamental question: How would the dark mode align with syntax highlighting themes already within GitLab? There are several themes that users may choose to display their repository files, snippets, or other code elements in their preferred way.\n\n![User syntax settings](https://about.gitlab.com/images/blogimages/ide-dark-syntax.png){: .center}\nUser syntax highlighting theme settings\n{: .note.text-center}\n\nThe Web IDE exists as a tool within the larger context of GitLab. Similarly, the syntax themes exist within the context of the Web IDE. Our goal was to avoid scenarios where the code area that follows the syntax highlighting theme wouldn't be aligned with the rest of the UI, which could be jarring.\n\nWe made the decision to keep the settings easily consumable, and treat the dark mode for the Web IDE UI as an extension of the dark syntax highlighting theme. From version 13.0 on, you can enable it by selecting the dark syntax highlighting theme, and the rest of the Web IDE will automatically follow. This also gives us the opportunity to later extend other themes and align the rest of the Web IDE UI to their colors.\n\n## The design process\n\n### Light and dark UI vs. themes\n\nInitially, we defined a few concepts to help shape our approach. We refer to light and dark UI in terms of the qualities they have, like brightness, depth, structure, and hierarchy. In GitLab, themes are preferential styles that reside on the UI, and use color to change only the appearance of a few elements.\n\n![UI versus themes](https://about.gitlab.com/images/blogimages/ide-dark-ui-vs-themes.png){: .center}\nThe difference between the UI and themes in GitLab\n{: .note.text-center}\n\n### Working in Figma\n\n#### Figma community\n\nAs soon as we wanted to start experimenting with the UI, we noticed first hand that \"Everyone can contribute\" is not only GitLab's core mission, but also an idea that is very much alive in the Figma community. The amazing designers at Microsoft have open-sourced a [design toolkit for Visual Studio Code](https://www.figma.com/community/file/786632241522687494) that allowed us to easily grab the relevant pieces, plug them into our own design file, and manipulate them.\n\n#### Asynchronous feedback\n\nAnother aspect that's deeply embedded in GitLab's ways of working and the way we build our products is asynchronous collaboration. We are the largest all-remote company in the world, and the two designers working on this feature are located in time zones seven hours apart.\n\nUsing Figma to collaborate and give each other feedback on our ideas enabled us to ship this feature with only having to schedule a single meeting, and the rest of the discussions handled via Figma comments. As these discussions were between designers and purely around visual aspects, we kept the discussion inside of Figma instead of using our own [Design Management](https://docs.gitlab.com/ee/user/project/issues/design_management.html) features, which came into play later during the discussions with the engineer working on this feature. It also allowed us to easily involve a lot of other team members, and get comments from other designers all over the globe.\n\n![A comment thread in Figma](https://about.gitlab.com/images/blogimages/ide-dark-async-thread.png){: .center}\nAsync design feedback in Figma\n{: .note.text-center}\n\n### Design challenges\n\nThe overarching design challenge was, and continues to be, understanding how the appearance of elements change as they appear in light vs. dark UI. Generally, structural, container-like UI elements decrease brightness, but content works the opposite and is sometimes nearly inverted. The fundamentals of light, shadow, and depth don't change, but the way the elements leverage them does. Similarly, the principles of content legibility, hierarchy, and contrast don't change, but the content does to uphold those principles.\n\nIn the side-by-side example below, we've compared just a few UI elements to demonstrate how they could change between light and dark UI.\n\n![Comparing light and dark UI in the Web IDE](https://about.gitlab.com/images/blogimages/ide-dark-comparison.png){: .center}\nComparing light and dark UI in the Web IDE\n{: .note.text-center}\n\nWhen we map the changes in this small sample, patterns start to emerge. Elements like backgrounds evenly shift darker together to maintain the same sense of depth, while some text content nearly inverts, and the button almost stays the same.\n\n![Colors mapped between light and dark UI](https://about.gitlab.com/images/blogimages/ide-dark-mapping-fade.png){: .center}\nMapping element color in light and dark UI\n{: .note.text-center}\n\nAt face value, it can seem as though many elements are inverted, but that's an oversimplification that leads to an interface looking not quite right. Here's how we're thinking about a few of the specific design challenges we encountered.\n\n#### Stateful elements\n\nIn a light UI, we darken element states to increase contrast, and typically do the opposite in a dark UI. This wasn't the case for tabs and similar elements that have backgrounds more closely integrated into other sections of the UI. And while the borders on the buttons got lighter, the background didn't because we needed to maintain text contrast.\n\n![Button and tab states in light and dark UI](https://about.gitlab.com/images/blogimages/ide-dark-states.png){: .center}\nComparing element states in light and dark UI\n{: .note.text-center}\n\nThis uncovers nuanced differences in the approach between dark and light UI, and we're still ratifying differences and establishing repeatable patterns. Needless to say each element deserves plenty of attention.\n\n#### Visual hierarchy and depth\n\nAs mentioned above, depth in dark mode was generally approached in the same way as in a light UI. Brighter elements are more forward, and darker ones recede. In the case of tabs and the file tree we are using a different approach and making these areas darker to increase contrast, rather than evenly darkening layers. We're learning that depth and contrast can both be effective tools, but they aren't always used the same in dark and light UI.\n\nA quick note on shadows, they shouldn't be replaced with glows — a completely different effect. Shadows are noticeably less effective in dark mode, so we explored more variance in gray backgrounds for neighboring sections.\n\n#### Graphics and illustration\n\nGraphics created for a light UI can seem garish or out of place in a dark UI. Images should be addressed on a case-by-case basis, but illustrations and icons can be addressed as a whole. We're exploring CSS variables and classes for SVG fill and path colors. One example that we had to solve were pipeline status icons. These exist in a couple of places in our product and initially had a white background. As this makes them stand out too much in dark mode, we had to rewrite their SVG code to get them to be transparent instead.\n\n![Icons with and without background fill changes](https://about.gitlab.com/images/blogimages/ide-dark-pipeline-icons.png){: .center}\nEnsuring that graphics, like icons, can be adjusted too\n{: .note.text-center}\n\nWith that in place we could map light and dark palettes. For now we're just ensuring that there aren't backgrounds in SVGs that feel out of place.\n\n#### How to ship in small pieces\n\nOur philosophy is to release changes or features as soon as they can help users. This sometimes leads to us shipping features that are not completely polished, which is in line with this [famous quote by Reid Hoffmann](https://twitter.com/reidhoffman/status/847142924240379904?lang=en), the founder and CEO of LinkedIn:\n\n> If you're not embarrassed by the first version of your product, you've launched too late.\n\nThe first version of this feature we released had only the code area styled with the dark syntax highlighting theme. Even though it felt a bit out of place, we received good feedback, which was evidence we were headed in the right direction.\n\n![MVC dark mode with light file tree](https://about.gitlab.com/images/blogimages/ide-dark-first-version.png){: .center}\nMVC dark mode with light file tree\n{: .note.text-center}\n\nFrom that point on, we sliced the remaining UI into smaller pieces. Every time we finished a piece, we released the newest version to all our users and started working on the next area. This highly iterative approach would not be acceptable in a lot of other companies, but at GitLab we believe in minimal viable changes ([MVC](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc)).\n\nAnother thing we learned was that a dark mode exposed not only structural UI deficiencies, but also inflexible code. Our initial intention was to leave a couple of seldom visited areas unstyled, but we noticed that keeping CSS styles from bleeding over into these areas would cause more problems and effort than fixing it altogether.\n\n#### Effective prototyping\n\nAs demonstrated in the previous paragraphs, one of the toughest challenges when designing a dark mode are elements with multiple states. This is also one of the aspects designers are still struggling with when prototyping, which led to us tackling this problem in a couple of ways:\n\n- Creating a large prototype with many artboards to represent edge cases and states\n- Relying heavily on a well-defined color system\n- Multiple sync calls with an engineer to fix smaller aspects, e.g., animations on the fly\n\nFor the next iteration of the prototype, we are going to investigate whether we can leverage Figma's components in a way that buttons have the same hover/focus/active states on multiple artboards. We have set up a [first small test](https://www.figma.com/proto/SvimjjirW0pkn69TNBztU9/Button-state-example?node-id=1%3A3&scaling=min-zoom) to prove that it would be possible, but haven't used it on a more complex prototype yet.\n\n![Web IDE Figma prototype](https://about.gitlab.com/images/blogimages/ide-dark-prototype-lg.png){: .center}\nWeb IDE prototype in Figma to demonstrate states\n{: .note.text-center}\n\n## What we learned so far\n\n- Answering questions for dark mode leads to many questions about why we're doing things a certain way in a light UI. It creates a great circular effect that challenges how we think about the entire UI, which leads to solid convictions.\n- Even a dark mode can be worked on in small iterations. Over the course of this process, we have created dark versions for all Web IDE specific UI elements, but also for dropdowns and modals, which are global elements. This not only makes it easier for us to think about the design, but also about how the code should be structured for a global dark mode.\n- We are clearly standing on the shoulders of giants. Designing and developing this dark mode at such a fast pace was only possible because we had many great in-depth resources about dark mode available to us. The two that stood out the most are [Apple's Human Interface Guidelines](https://developer.apple.com/design/human-interface-guidelines/ios/visual-design/dark-mode/) and the dark theme section from [Material Design](https://material.io/design/color/dark-theme.html).\n\n![Web IDE dark mode](https://about.gitlab.com/images/blogimages/ide-dark-loop.gif){: .center}\nWeb IDE dark mode\n{: .note.text-center}\n\n### Next steps\n\n- For the Web IDE as a feature, we're in the process of making our code more easily themable, so that other syntax highlighting themes can be extended more flexibly.\n- We're also planning to clean up the prototype we created, and either create a Web IDE UI Kit, or integrate it into our Pajamas design system, so that others can easily access, modify, and contribute to it.\n\nLastly, you can contribute too! We would especially love to see contributions to extend the other syntax highlighting themes to the rest of our Web IDE UI. If you have anything else in regards to the Web IDE you'd like us to consider, [create a new issue](https://gitlab.com/gitlab-org/gitlab/issues/new) and be sure to tag the GitLab UX Department (@gitlab-com/gitlab-ux). If you'd like to be part of our testing efforts at any level, sign up for our [GitLab First Look](/community/gitlab-first-look/) program. You can also [contribute](https://gitlab.com/gitlab-org/gitlab-design/-/blob/master/CONTRIBUTING-Figma.md) to the design of GitLab by starting with our [Pajamas UI Kit](https://www.figma.com/community/file/781156790581391771) in Figma.\n",[1137,791,1323,9],"design",{"slug":1325,"featured":6,"template":699},"creating-a-dark-ui-for-gitlabs-web-ide","content:en-us:blog:creating-a-dark-ui-for-gitlabs-web-ide.yml","Creating A Dark Ui For Gitlabs Web Ide","en-us/blog/creating-a-dark-ui-for-gitlabs-web-ide.yml","en-us/blog/creating-a-dark-ui-for-gitlabs-web-ide",{"_path":1331,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1332,"content":1338,"config":1344,"_id":1346,"_type":13,"title":1347,"_source":15,"_file":1348,"_stem":1349,"_extension":18},"/en-us/blog/cross-project-pipeline",{"title":1333,"description":1334,"ogTitle":1333,"ogDescription":1334,"noIndex":6,"ogImage":1335,"ogUrl":1336,"ogSiteName":685,"ogType":686,"canonicalUrls":1336,"schema":1337},"How to trigger multiple pipelines using GitLab CI/CD","Discover how to trigger and visualize pipelines when you set up GitLab CI/CD across multiple projects.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666903/Blog/Hero%20Images/pipeline.jpg","https://about.gitlab.com/blog/cross-project-pipeline","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to trigger multiple pipelines using GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2019-07-24\",\n      }",{"title":1333,"description":1334,"authors":1339,"heroImage":1335,"date":1341,"body":1342,"category":832,"tags":1343},[1340],"Itzik Gan Baruch","2019-07-24","\n[Continuous integration (CI)](/solutions/continuous-integration/) is the practice of [automating code building and testing](/topics/ci-cd/) before it is\nmerged into the master or default branch. This allows developers to merge code early and frequently, while\nmitigating the risk of introducing new bugs into the master source code repository.\n\nWhile CI verifies that new code won't break when integrated with other code in the same repo, having\nall tests pass on that repo is only the first step. After running CI on the code, it is important to\ndeploy and run tests in a live environment. Moving from [CI to continuous delivery and deployment (CD)](/solutions/continuous-integration/)\nis [the next step of DevOps maturity](/topics/devops/). Deploying and then testing again allows code in one project\nto be tested together with other components and services which may be managed in other projects.\n\n## Why do I need to verify that my code works with other components?\n\nA good example could be a\nmicroservices architecture. Usually, different [microservices](/topics/microservices/) are managed in\ndifferent [projects](https://docs.gitlab.com/ee/user/project/) – each microservice has its own\nrepository and own pipeline. It's also very common for different teams to be\nresponsible for different microservices and their pipeline configurations. As a developer you will\nwant to confirm that your code changes don't break functionality of the dependent microservices.\nTherefore, you will want to execute tests on those microservices in addition to your project tests.\n\n## The cross-project pipeline\n\nWhen running your [project pipeline](/topics/ci-cd/cicd-pipeline/), you also want to trigger cross-project or multi-project pipelines,\nwhich will eventually deploy and test the latest version of all dependent microservices. To\nachieve this goal you need an easy, flexible and convenient way to trigger other\npipelines as part of your project CI. GitLab CI/CD offers an easy way to run a cross-project\npipeline by simply adding a pipeline trigger job in the CI configuration file.\n\n## GitLab CI/CD configuration file\n\nIn GitLab CI/CD, pipelines, and their component jobs and stages, are defined in\nthe [`.gitlab-ci.yml`](https://docs.gitlab.com/ee/ci/yaml/) file for each project. The\nfile is part of the project repository. It is fully versioned and developers can edit it with any\ncommon IDE of their choice. They do not have to ask the system admin or DevOps team to make\nchanges in the pipeline configuration as it is self-service. The `.gitlab-ci.yml` file defines the structure\nand order of the pipelines and determines what to execute\nusing [GitLab Runner](https://docs.gitlab.com/runner/) (the agent that runs the jobs), and what\ndecisions to make when specific conditions are encountered, like when a process succeeds or fails.\n\n## Add a cross-project pipeline triggering job\n\nSince GitLab 11.8, GitLab provides a new CI/CD configuration syntax for triggering cross-project\npipelines found in the [pipeline configuration file](https://docs.gitlab.com/ee/ci/yaml/).\nThe following code illustrates configuring a bridge job to trigger a downstream pipeline:\n\n```\n//job1 is a job in the upstream project\ndeploy:\n\tstage: Deploy\n\tscript: this is my script\n\n//job2 is a bridge job in the upstream project which triggers cross-project pipeline\nAndroid:\n\tstage: Trigger-cross-projects\n            trigger: mobile/android\n```\n\nIn the example above, as soon as the deploy job succeeds in the deploy stage, the Android\nbridge job is going to be started. The initial status of this job will be pending. GitLab will\ncreate a downstream pipeline in the mobile/android project and, as soon as the pipeline gets created,\nthe Android job will succeed. In this case mobile/android is a full path to that project.\n\nThe user who created the upstream pipeline needs to have access rights to the downstream\nproject (mobile/android in this case). If a downstream project cannot be found, or a user does not\nhave access rights to create a pipeline there, the Android job will be marked as failed.\n\n## Browse from upstream pipeline graphs to downstream\n\nGitLab CI/CD makes it possible to visualize the pipeline configuration. In the below illustration, the\nbuild, test, and deploy stages are parts of the upstream project. Once the deploy job succeeds, four\ncross-projects will be triggered in parallel and you will be able to browse to them by clicking on\none of the downstream jobs.\n\n![Build, test and deploy stages](https://about.gitlab.com/images/blogimages/Cross-proj-img1.png){: .shadow.medium.center}\n\nIn the below illustration the Service – Finance downstream pipeline is visible. We can now scroll\nleft to the upstream pipeline, scroll right back to the downstream pipeline or select another\ndownstream pipeline.\n\n![Service – Finance pipeline](https://about.gitlab.com/images/blogimages/Cross-proj-img2.png){: .shadow.medium.center}\n\n## Specifying a downstream pipeline branch\n\nIt is possible to specify a branch name that a downstream pipeline will use:\n\n```\ntrigger:\n     project: mobile/android\n     branch: stable-11-2\n```\n\nUse a project keyword to specify the full path to a downstream project. Use a branch keyword to\nspecify a branch name. GitLab will use a commit that is currently on the HEAD of the branch\nwhen creating a downstream pipeline.\n\n## Passing variables to a downstream pipeline\n\nSometimes you might want to pass variables to a downstream pipeline. You can do that using\nthe variables keyword, just like you would when defining a regular job.\n\n```\nAndroid:\n           variable:\n\t     ENVIRONMENT: ‘This is the variable value for the downstream pipeline’\n           stage: Trigger-cross-projects\n           trigger: mobile/android\n```\nThe ENVIRONMENT variable will be passed to every job defined in a downstream pipeline. It will be\navailable as an environment variable when GitLab Runner picks a job.\n\n## Cross-project pipeline summary\n\nThe `.gitlab-ci.yml` file defines the order of the CI/CD stages, which jobs to execute, and at which\nconditions to run or skip a job's execution. Adding a 'bridge job' with the `trigger` keyword to\nthis file can be used to trigger cross-project pipelines. We can pass parameters to jobs in\ndownstream pipelines, and even define a branch that a downstream pipeline will use.\n\nPipelines can be complex structures with many sequential and parallel jobs, and as we just\nlearned, sometimes they can trigger downstream pipelines. To make it easier to understand the\nflow of a pipeline, including its downstream pipelines, GitLab has pipeline graphs for viewing\npipelines and each pipeline's status.\n\n![Service – Finance pipeline](https://about.gitlab.com/images/blogimages/Cross-proj-img4.png){: .shadow.medium.center}\n\nHey community, what else would you like me to explain in a blog post? Let me know in the comments or tweet us [@gitlab](https://twitter.com/gitlab).\n\nCover image by [Tian Kuan](https://unsplash.com/@realaxer) on [Unsplash](https://unsplash.com)\n{: .note}\n",[108,722,834,789,9],{"slug":1345,"featured":6,"template":699},"cross-project-pipeline","content:en-us:blog:cross-project-pipeline.yml","Cross Project Pipeline","en-us/blog/cross-project-pipeline.yml","en-us/blog/cross-project-pipeline",{"_path":1351,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1352,"content":1358,"config":1365,"_id":1367,"_type":13,"title":1368,"_source":15,"_file":1369,"_stem":1370,"_extension":18},"/en-us/blog/customer-interview-charter-communications",{"title":1353,"description":1354,"ogTitle":1353,"ogDescription":1354,"noIndex":6,"ogImage":1355,"ogUrl":1356,"ogSiteName":685,"ogType":686,"canonicalUrls":1356,"schema":1357},"Better Developer & Customer Experiences with One Application","Director of Product Integration Michael Sobota of Charter Communications shares how they're using GitLab to simplify their toolchain, with big results.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663655/Blog/Hero%20Images/gitlab-live-sept-2018.png","https://about.gitlab.com/blog/customer-interview-charter-communications","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Customer story: Driving better developer and customer experiences with a single application\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2018-09-26\",\n      }",{"title":1359,"description":1354,"authors":1360,"heroImage":1355,"date":1361,"body":1362,"category":300,"tags":1363},"Customer story: Driving better developer and customer experiences with a single application",[1133],"2018-09-26","\nDuring [#GitLabLive](/blog/gitlab-live-event-recap/), customer Michael Sobota,\nDirector of Product Integration at [Charter Communications](https://www.spectrum.com/about.html), joined us to share how adopting\nGitLab as the [single application](/handbook/product/single-application/) for their entire software development lifecycle has brought their\nfeedback loop of two weeks down to a matter of minutes. Charter is an American telecom\ncompany providing services to over 26 million customers in 41 states, and is the second-largest\ncable operator in the US. They have 94,000 employees worldwide.\n\nYou can watch the interview with Michael and check out our key takeaways from it below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/HnTPi7y5MVo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Key takeaways\n\n### A single place for all development, operations, and feedback is critical to a great developer experience\n\n Michael: \"It's my job to make sure developers who are providing a digital experience to our\n subscribers have a great developer experience: Helping them realize that vision of quick iterations,\n giving them feedback, shifting left concerns like security and testing, deployments, and getting\n that feedback early in our value stream where it’s cheaper to course correct.\"\n\n\"GitLab has been a cornerstone of our [DevOps platform](/solutions/devops-platform/): using it for source control management,\nfor continuous integration, continuous deployment, a Docker registry, artifacts. We want to give\ndevelopers a single place to get feedback, self-service, and do it in a responsible manner that\nallows us to provide great value to our subscribers.\"\n\n### Quick feedback is also essential to staying competitive\n\nMichael: \"Consumers and subscribers are looking for different, more digital ways to interact\nwith companies and to consume content. Shifting left allows us to be competitive in creating\nthese new, digital ways for consumers to interact with us, whether it’s paying their bill or understanding\nhow their account is set up, ordering a new service, consuming live streaming video, or video on demand.\nCustomers want that quick feedback and do to that we need to shift things left.\"\n\n### Having everything in one place can drastically reduce your feedback loop\n\nMichael: To be able to understand, \"Did my code merge in? Did it build the capacity tests? Did it pass\nthe security standards?\" – these things, in a single place, within the merge request, within that\nUI, have helped us cut down our feedback loop that was typically around our sprint cycle of\naround two weeks, down to minutes.\"\n\n\"Gone are the days of managing different build machines. It’s all in the power of the developers,\nand now from the first line of code on every single branch, we can deploy a mutually exclusive\nenvironment and get feedback in minutes down from that two-week cycle. Now, almost every\nsingle branch of code can have a deployment, and you can have feedback as a developer, as a\nproduct owner, or as a designer, right away.\"\n",[722,1364,9],"user stories",{"slug":1366,"featured":6,"template":699},"customer-interview-charter-communications","content:en-us:blog:customer-interview-charter-communications.yml","Customer Interview Charter Communications","en-us/blog/customer-interview-charter-communications.yml","en-us/blog/customer-interview-charter-communications",{"_path":1372,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1373,"content":1379,"config":1386,"_id":1388,"_type":13,"title":1389,"_source":15,"_file":1390,"_stem":1391,"_extension":18},"/en-us/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features",{"title":1374,"description":1375,"ogTitle":1374,"ogDescription":1375,"noIndex":6,"ogImage":1376,"ogUrl":1377,"ogSiteName":685,"ogType":686,"canonicalUrls":1377,"schema":1378},"Developing GitLab Duo: How we are dogfooding our AI features","As part of our blog series, we share real-world examples of how we integrate AI throughout our software development lifecycle and how we use metrics to gauge their success.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098360/Blog/Hero%20Images/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25_7JlF3WlEkswGQbcTe8DOTB_1750098360821.png","https://about.gitlab.com/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo: How we are dogfooding our AI features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2024-05-20\",\n      }",{"title":1374,"description":1375,"authors":1380,"heroImage":1376,"date":1382,"body":1383,"category":764,"tags":1384},[1381],"David O'Regan","2024-05-20","***Generative AI marks a monumental shift in the software development industry, making it easier to develop, secure, and operate software. Our new blog series, written by our product and engineering teams, gives you an inside look at how we create, test, and deploy the AI features you need integrated throughout the enterprise. Get to know new capabilities within GitLab Duo and how they will help DevSecOps teams deliver better results for customers.***\n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI-powered features, has transformed our internal engineering workflows, driving efficiency gains across our development process. As strong proponents of dogfooding and transparency, we wanted to showcase how our teams leverage AI, including standouts like GitLab Duo Code Suggestions and GitLab Duo Chat, daily to streamline development processes, reduce manual effort, and enhance productivity. You'll learn about the benefits we've experienced for highly technical teams like engineering to less technical teams such as technical writing and product management.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n## Real-world use cases\n\nOur teams have integrated [GitLab Duo's many features](https://about.gitlab.com/gitlab-duo/#features) into their daily routines. Here are some examples of how GitLab Duo is helping them carry out everyday activities.\n\n### Summarization and documentation\n- **Streamline the code review process:** Staff Backend Developer [Gosia Ksionek](https://about.gitlab.com/company/team/#mksionek) showcases the practical benefits of AI in her workflow by using GitLab Duo to streamline the code review process. She effectively utilizes GitLab Duo to [summarize merge requests](https://youtu.be/3SIhe8dgFEc), making it easier and faster to review code changes. In addition to summarizing merge requests, Gosia also leverages GitLab Duo to [answer coding questions](https://www.youtube.com/watch?v=6n0I53XsjTc) and [explain complex code snippets](https://www.youtube.com/watch?v=3m2YRxa1SCY). This enhances her productivity and helps her better understand and manage intricate codebases. Through these demonstrations, Gosia highlights how GitLab Duo can significantly improve efficiency and clarity in the development process, making it an invaluable tool for developers.\n\n\u003Ccenter>\n\nWatch Gosia use GitLab Duo Merge Request Summary:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/3SIhe8dgFEc?si=Q8JG3Ix3K_THhbpv\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWatch Gosia use GitLab Duo to answer coding questions: \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/6n0I53XsjTc?si=LA9VBHrgXpfJImSL\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWatch Gosia use GitLab Duo to explain complex code snippets:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/3m2YRxa1SCY?si=oms3szKwZoz-4yeq\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003C/center>\n\n- **Condense comment threads:** [Bartek Marnane](https://about.gitlab.com/company/team/#bmarnane), Vice President of Expansion Software Development, uses GitLab Duo to condense lengthy comment threads into concise summaries, ensuring all relevant details are captured when updating issue descriptions.\n\n- **Create new documentation:** [Taylor McCaslin](https://about.gitlab.com/company/team/#tmccaslin), Group Manager, Product - Data Science Section, leveraged GitLab Duo to [create new documentation for GitLab Duo itself](https://docs.gitlab.com/ee/user/ai_features.html), exemplifying a meta use case that enhances clarity and consistency and greatly reduces the time to document new features.\n\n- **Craft release notes:** [Amanda Rueda](https://about.gitlab.com/company/team/#amandarueda), Senior Product Manager for Product Planning, uses GitLab Duo to [craft brief, impactful summaries for release notes](https://gitlab.com/groups/gitlab-org/-/epics/10267), highlighting changes and their value to users. By using well-crafted prompts like below, Amanda supercharges her workflow and ensures that each release note is clear, concise, and user-focused, enhancing the overall communication and user experience:\u003Cbr>\u003Cbr>\n*“Please create a two sentence summary of this change, which can be used for our release notes. The tone should be conversational and should be in second person. The summary should include a description of the problem or change and be tied to the value we are creating for you, the user.”*\n\u003Cbr>\u003Cbr>\n    - Here are some examples of release notes co-created with GitLab Duo:\n      - [Expanded options for sorting your Roadmap](https://gitlab.com/gitlab-org/gitlab/-/issues/460492)\n      - [Issue Board Clarity now with Milestone & Iteration](https://gitlab.com/gitlab-org/gitlab/-/issues/25758)\n      - [Design Management Features Extended to Product Teams](https://gitlab.com/gitlab-org/gitlab/-/issues/438829)\n\n- **Optimize docs site navigation:** [Suzanne Selhorn](https://about.gitlab.com/company/team/#sselhorn), Staff Technical Writer, tapped GitLab Duo to [optimize the left navigation of documentation](https://docs.gitlab.com/ee/user/get_started/get_started_projects.html) by providing a workflow-based order of pages. Suzanne provided a list of features to GitLab Duo, which generated the optimal order, updating the left navigation to match. GitLab Duo also drafted the [Getting Started](https://docs.gitlab.com/ee/user/get_started/get_started_planning_work.html) documentation much faster than were she to use traditional, manual approaches.\n\n### Goal setting and team alignment\n- **Draft and refine OKRs:** [François Rosé](https://about.gitlab.com/company/team/#francoisrose), Engineering manager, Create:Code Review Backend, finds [GitLab Duo Chat](https://about.gitlab.com/blog/gitlab-duo-chat-now-generally-available/) invaluable for drafting and refining OKRs. By articulating objectives more clearly and effectively, François enhances goal setting and team alignment. Using Chat, François ensures that each OKR is precise, actionable, and aligned with the team's goals, thereby improving overall team performance and cohesion. Here is an example prompt he uses:\u003Cbr>\u003Cbr>\n\n    *\"Here is an OKR I am thinking of creating:*\n\n    *Objective: Retrospect on retrospectives, to foster a thriving team*\n\n    *KR: Measure retrospective satisfaction from 100% of team members*\n\n    *KR: Identify 3 improvements to the async retrospectives*\n\n    *KR: Implement 1 improvement*\n\n    *Please provide direct feedback on how to improve the formulation of this objective and these key results.\"*\n\u003Cbr>\u003Cbr>\n\n- **Streamlined hiring and recruitment processes:** Chat helped [Denys Mishunov](https://about.gitlab.com/company/team/#dmishunov), Staff Frontend Engineer, formulate a clear and concise text for updating the email template for technical interview candidates. The team collaborated on refining the communication to ensure candidates receive all necessary information using a merge request. This example showcased the practical application of AI tools in enhancing communication processes within the hiring workflow.\n\n### Incident response and configuration\n- **Summarize production incidents:** [Steve Xuereb](https://about.gitlab.com/company/team/#sxuereb), Staff Site Reliability Engineer, employs GitLab Duo to summarize production incidents and create detailed incident reviews, streamlining the documentation process.\n\n- **Create boilerplate `.gitlab-ci.yml` files:**  Steve also uses Chat to create boilerplate `.gitlab-ci.yml` files, which significantly sped up his workflow. [Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html) serves as a valuable partner for suggesting ideas. Additionally, [Code Explanation](https://docs.gitlab.com/ee/user/ai_features.html#code-explanation) provides detailed answers that are helpful during incidents, enhancing his productivity and understanding of the codebase.\n\n### Code generation and testing\n- **Full-stack development:** [Peter Hegman](https://about.gitlab.com/company/team/#peterhegman), Senior Frontend Engineer, has been using [Code Suggestions for his JavaScript and Ruby development](https://gitlab.com/gitlab-org/gitlab/-/issues/435783#note_1731321963). This highlights that Code Suggestions has become a powerful tool for developers moving across a full technical stack. \n\n- **Generate Python scripts:** Denys conducted [an experiment using GitLab Duo for a non-GitLab task](https://gitlab.com/gitlab-org/ai-powered/ai-framework/ai-experimentation). This example highlights the flexibility and utility of our AI tools beyond typical software development tasks.\n\n\u003Ccenter>\nWatch how Denys uses GitLab Duo to generate Python scripts to fetch content data and store it locally:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/30ZTtk4K5yU?si=p5ZcFLg6dTZL5gFE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003C/center>\n\n### Research and support\n- **Generate test source code:**  [Michael Friedrich](https://about.gitlab.com/company/team/#dnsmichi), Senior Developer Advocate, uses GitLab Duo to generate test source code for CI/CD components. This approach has been shared in various talks and presentations, such as the recent Open Source @ Siemens event ([public slides](https://go.gitlab.com/duA2Fc)). Using GitLab Duo in this manner helps ensure that the code is consistent, well-documented, and aligned with our best practices. Check out his [Rust example](https://gitlab.com/components/rust#contributing).\n\n![Rust example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098367/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098367547.png)\n\n- **Streamline research tasks:** Our team members consistently turn to Chat when they have questions about GitLab features, streamlining their research and support tasks. Michael shared, \"When I have a question about GitLab features, I default to using Chat instead of opening 100 browser tabs. This workflow helps me assist users on our community forum efficiently. For instance, I recently [helped a user with SSH deployment](https://forum.gitlab.com/t/how-to-make-ssh-deployment-more-clear-in-gitlab/102051/4?u=dnsmichi) using this method.\" Using Chat not only saves time but also provides quick, accurate information, enhancing the support we offer to our community.\n\n### Feature testing\n- **Test new features:** Our engineers use GitLab Duo to test new features like [Markdown support in Code Suggestions](https://gitlab.com/gitlab-org/gitlab/-/issues/443365). One of our team members noted, \"I need to test Markdown support in Code Suggestions for writing blog posts and GitLab docs in VS Code. I saw it was merged for 17.0.\" By testing these features internally, we ensure they meet our quality standards before release.\n\n### Understanding external codebases\n- **Explain external projects:** GitLab Duo's `/explain` feature is particularly useful for understanding external projects imported into GitLab. This capability was highlighted in a recent livestream he did with open source expert Eddie Jaoude. Michael let us know, \"I use `/explain` on external projects to understand the source code. I pitched this idea for learning about open source projects, dependencies, etc. during the livestream.\" This feature is invaluable for developers who need to quickly grasp the functionality and dependencies of unfamiliar codebases, significantly improving their efficiency and understanding.\n\n\u003Ccenter>\nWatch Michael demo `/explain` during a livestream with Eddie Jaoude:\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/L2Mx8hOhkEE?si=R7W3v4EDqeJCaPOw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003C/center>\n\n## GitLab Duo's benefits\n\nThe integration of GitLab Duo has brought about numerous positive impacts, significantly enhancing our engineering and product development workflows:\n\n- Many tasks that previously required manual intervention are now automated, freeing up valuable time for our engineers. For example, summarizing long threads and creating boilerplate code are now more efficient, allowing our team to focus on more complex issues.\n- The time taken to document and summarize issues has decreased, allowing for quicker information dissemination and decision-making.\n- With AI-assisted code suggestions and explanations, our teams produce higher quality code with fewer errors and faster debugging processes. The integration of GitLab Duo into incident reviews and coding assistance has led to more efficient and effective code reviews.\n- Administrative tasks, such as drafting OKRs and creating release notes, have been streamlined. \n\nGitLab Duo has helped to not only improve our efficiency but also to enhance the quality and speed of our development processes, illustrating the transformative power of AI in software development.\n\n## What's next?\n\nWe are committed to further integrating AI into our workflows and continuously improving GitLab Duo features based on internal feedback and evolving needs. The ongoing collection of use cases and metrics with the [AI Impact analytics dashboard](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/) will guide enhancements and ensure that GitLab Duo remains at the forefront of AI-driven development tools.\n\n![Dogfooding Duo - AI analytics dashboard](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098367/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098367547.png)\n\n> [Get started using GitLab Duo today with our free trial.](https://about.gitlab.com/gitlab-duo/#free-trial)\n\n## Read more \"Developing GitLab Duo\"\n\n- [Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n- [Developing GitLab Duo: How we validate and test AI models at scale](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale/)\n- [Developing GitLab Duo: Secure and thoroughly test AI-generated code](https://about.gitlab.com/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code/)\n- [Developing GitLab Duo: Blending AI and Root Cause Analysis to fix CI/CD pipelines](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/)",[766,1385,834,495,9],"code review",{"slug":1387,"featured":90,"template":699},"developing-gitlab-duo-how-we-are-dogfooding-our-ai-features","content:en-us:blog:developing-gitlab-duo-how-we-are-dogfooding-our-ai-features.yml","Developing Gitlab Duo How We Are Dogfooding Our Ai Features","en-us/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features.yml","en-us/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features",{"_path":1393,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1394,"content":1400,"config":1407,"_id":1409,"_type":13,"title":1410,"_source":15,"_file":1411,"_stem":1412,"_extension":18},"/en-us/blog/devops-at-nova-scotia-province",{"title":1395,"description":1396,"ogTitle":1395,"ogDescription":1396,"noIndex":6,"ogImage":1397,"ogUrl":1398,"ogSiteName":685,"ogType":686,"canonicalUrls":1398,"schema":1399},"How we introduced DevOps at the province of Nova Scotia","The Linux Ops team and one of the Development teams at the Government of Nova Scotia introduced DevOps practices to their workflow – find out how they did it and what benefits they're now enjoying.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670214/Blog/Hero%20Images/devops-nova-scotia-cover.jpg","https://about.gitlab.com/blog/devops-at-nova-scotia-province","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we introduced DevOps at the province of Nova Scotia\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Steven Zinck\"},{\"@type\":\"Person\",\"name\":\"Paul Badcock\"}],\n        \"datePublished\": \"2017-08-14\",\n      }",{"title":1395,"description":1396,"authors":1401,"heroImage":1397,"date":1404,"body":1405,"category":718,"tags":1406},[1402,1403],"Steven Zinck","Paul Badcock","2017-08-14","DevOps is the practice of breaking down silos between Development and\nOperations teams. DevOps promotes a culture and practices where Dev and Ops\nteams have open communication and collaboration. This article explains how\nthe Linux Ops team and one of the Development teams at the Government of\nNova Scotia were able to implement DevOps practices and realize its\nbenefits.\n\n\n\u003C!-- more -->\n\n\n## The beginning\n\n\nThe Linux Ops team was asked to host a Ruby application built circa 2006.\nWe’re a Red Hat Enterprise Linux shop, provisioning the newest release of\nRHEL 7 and the Ruby app required gems that are only compatible with RHEL 6\nand older. So, we had two options - provision a new RHEL 6 VM - something we\nhaven’t done in over a year, or take this opportunity to containerize the\napplication and use it as a proof of concept. Although we’ve been using\ncontainers for over two years in our [Puppet\nCI](https://medium.com/@szinck/how-we-use-gitlab-at-the-province-of-nova-scotia-708b514cc47f)\nenvironment, and have containerized some of our own management apps, this\nwas our first client application to containerize.\n\n\nYou can also learn more about our DevOps transformation by watching our\nrecent interview:\n\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/SHdeqznJXbc\" frameborder=\"0\" allowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\n\n## Ops digs into the application\n\n\nSince the Ruby code for the application was already in our GitLab, we had\neasy access to it so we could begin to understand its functionality. How\ndoes authentication work? How does SSL work? Where are assets stored?\nExactly which gems are required? Does the system send email, and if so, how?\n\nAs we started to peek into the application, we found the answers to all of\nthese things and a lot more. We were able to make a couple improvements to\nthe application, for example, we were able to remove hard-coded values and\nswitch to variables. In particular, we were able to expose database\nconnection strings which can be leveraged by Docker Swarm.\n\n\n## The Docker image\n\n\nNow that we had a better understanding of how the application works, we\nstarted working on the container to host the application. We started with a\nbase image of RHEL 6.9 and began layering on the dependencies and the\napplication itself. Since the Development team is naturally very\nknowledgeable about their application, we collaborated closely with them on\nthis process.\n\n\n## Automatically building and deploying\n\n\nOnce we had an image we were happy with, it was time to configure [Docker\nSwarm](https://docs.docker.com/engine/swarm/) and configure GitLab CI to\npush the image to our Docker registry.\n\n\nI’ve included the relevant piece of our CI configuration below. As you can\nsee, we’re tagging the Docker image with the last commit # and pushing it to\nour internal registry.\n\n\n```build_image:\n  image: docker:1.12\n  stage: build\n  script:\n    - docker build -t\n    ${DOCKER_REGISTRY}/${NAMESPACE}/${CI_PROJECT_NAME}:${CI_COMMIT_SHA}\n    - docker push  \n    ${DOCKER_REGISTRY}/${NAMESPACE}/${CI_PROJECT_NAME}:${CI_COMMIT_SHA}\n```\n\n\nNow that the image is up on our registry, we can tell Docker Swarm that a\nnew image is available. Swarm will automatically pull down the new image and\nreload the application with less than five seconds of downtime.\n\n\n```\n\nDOCKER_HOST=\"${DOCKER_DEV_HOST}\" docker service update --image  \n\n${DOCKER_REGISTRY}/${NAMESPACE}/${CI_PROJECT_NAME}:${CI_COMMIT_SHA}  \n\n${CI_PROJECT_NAME}_app_1\n\n```\n\n\n## Automating security scanning (DevSecOps!)\n\n\nIn addition to building the image, we also run a battery of security tests\nagainst the application code, the operating system, and application in its\nrunning state.\n\n\n![pipeline](https://about.gitlab.com/images/blogimages/devops-nova-scotia-screengrab.png){:\n.shadow}\u003Cbr>\n\n\nAs you can see from the pipeline, after the image is built, we run a static\ncode analysis using [Brakeman](http://brakemanscanner.org/). Brakeman tests\nthe code for security issues, and since it’s a code analysis tool, the\napplication doesn’t need to be running. After the code scan, we run [Red\nHat’s atomic\nscanner](https://developers.redhat.com/blog/introducing-atomic-scan-container-vulnerability-detection/)\nagainst the image. This tool will notify us of any known security issues in\nthe operating system. Finally, we can deploy the application and then run\n[Arachni](http://www.arachni-scanner.com/) to test the application in its\nrunning state.\n\n\n## Benefits of DevOps\n\n\nWe’ve discovered several benefits from this approach:\n\n\n- The Ops and Dev teams worked closely together, each learning about the\nother's domain expertise. As Ops discovered issues with the application, we\nwere able to make code changes that were peer-reviewed by the Dev team using\nthe [Git Flow](https://datasift.github.io/gitflow/IntroducingGitFlow.html)\ndevelopment model.\n\n- The time to delivery for the application has improved drastically, and a\nframework has been established that existing, new and third-party staff can\nall leverage.\n\n- Lower failure ratec - if a new vulnerability is introduced into the stack,\nwe’ll know.\n\n- Fixes can be applied on demand by Dev without Ops involvement.\n\n- Recovery of the application is now as simple as two clicks.\n\n- Dev and Ops both understand how the application functions and have a\nblueprint of its architecture in the Docker configuration.\n\n\n## Next steps\n\n\nWe’re actively collaborating with other Development teams across government\nto implement DevOps-style practices. From a technology perspective, we’re\naggressively working towards improving our technology stack so that we can\nimprove business value for our customers.\n\n\nThis post originally appeared on\n[*Medium*](https://medium.com/@szinck/devops-at-the-province-of-nova-scotia-42688759a25d).\n\n\n### About the Guest Authors\n\n\n[Steve Zinck](https://www.linkedin.com/in/stevezinck/) spent most of his\ncareer working in the Public Service as a Unix and Infrastructure\nadministrator. Over the past few years, he's started to transition away from\ntraditional systems administration and begun to focus on software delivery\nand automation. As part of that transition, his team has implemented GitLab\nat the core of our automation and software delivery stack. His current focus\nis working with software and application teams to assist in streamlining\ntheir deployment and delivery process.\n\n\n[Paul Badcock](https://www.linkedin.com/in/pbadcock/?ppe=1) started working\nin the IT sector in 1998 with positions in small startups, to large Fortune\n500 companies, to currently on a public-sector team. His career was focused\nas a traditional IT Linux administrator until in the mid-2000s he started\nfocusing on adopting development tooling, practices and methodologies for\noperational teams. This work culminated in implementing an early 2010s\nDevOps workplace framework with the help of @stewbawka and subsequently\nworking with like-minded teams since. As a part of adopting developer tools\nhe has previously worked with and managed CVS, SVN installations and various\nvendor products before reading a “Show HN” posting on Hacker News about\nGitLab.\n",[722,9],{"slug":1408,"featured":6,"template":699},"devops-at-nova-scotia-province","content:en-us:blog:devops-at-nova-scotia-province.yml","Devops At Nova Scotia Province","en-us/blog/devops-at-nova-scotia-province.yml","en-us/blog/devops-at-nova-scotia-province",{"_path":1414,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1415,"content":1421,"config":1428,"_id":1430,"_type":13,"title":1431,"_source":15,"_file":1432,"_stem":1433,"_extension":18},"/en-us/blog/devsecops-security-automation",{"title":1416,"description":1417,"ogTitle":1416,"ogDescription":1417,"noIndex":6,"ogImage":1418,"ogUrl":1419,"ogSiteName":685,"ogType":686,"canonicalUrls":1419,"schema":1420},"Automated security testing for DevSecOps","We share four fool-proof ways to bring your security automation to the next level and five reasons why it's critical.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662504/Blog/Hero%20Images/devsecops-automated-security.jpg","https://about.gitlab.com/blog/devsecops-security-automation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automated security testing for DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2020-07-08\",\n      }",{"title":1416,"description":1417,"authors":1422,"heroImage":1418,"date":1424,"body":1425,"category":718,"tags":1426},[1423],"Vanessa Wegner","2020-07-08","\n\n_This is the third in our five-part series on getting started with [DevSecOps](/topics/devsecops/). Part one gives you nine ways to [shift security left](/blog/efficient-devsecops-nine-tips-shift-left/). Part two outlines the steps needed to create [silo-free collaboration](/blog/achieve-devsecops-collaboration/)._\n\nNearly 83% of developers in [GitLab’s 2020 DevSecOps survey](/developer-survey/) say they’re releasing code faster today than ever before thanks to [DevOps](/topics/devops/). About 65% also say security is shifting left in their organizations. How far left is that shift? Not that far: Over 60% of developers don’t actually run static [application security](/topics/devsecops/) testing (SAST) scans, and 73% don’t conduct dynamic application security testing (DAST) scans.\n\nThis needs to change.\n\nSecurity is often a bottleneck to faster releases but it is much too risky to minimize or ignore. DevSecOps promises to bring security  forward in the software development lifecycle (SDLC). This can be done a number of ways but automated security testing streamlines adoption and scalability. A respondent to this year’s DevSecOps Survey summarized it nicely:\n\n> Automated testing and continuous integration have made our deployments safer and more optimized. Now everyone in the team has the permission to deploy the code.\n\n## The need for security automation and good security practices\n\nThere is an attempted cyber-attack [every 44 seconds](https://us.norton.com/blog/emerging-threats/cybersecurity-statistics#:~:text=There%20isn't%20concise%20data,people%20being%20hacked%20per%20year.) on average. \n\n_Every. 44. Seconds._ \n\nThis also equates to approximately 2,200 daily attacks resulting in about 800,000 people being hacked each year. Unfortunately, no one has the time, patience, or bandwidth to keep their eyes and hands ready to stop or address cyber attacks on the horizon. That’s why security automation tools exist.\n\nAnd consider this: cyber attackers aren’t doing everything by hand – they employ automation too. This means security processes also [need automation to keep up](https://www.checkpoint.com/cyber-hub/cyber-security/security-automation/#:~:text=Security%20automation%20is%20the%20automation,scale%20to%20handle%20growing%20workloads.). \n\nA security automation solution can include real-time monitoring tools that constantly manage security vulnerabilities and take automatic action where needed. It’s like adding a second pair of invisible hands to the team to help prevent and resolve security issues. Increased security measures can save any organization time and money and avoid the loss of sensitive files. \n\n\n## 4 Ways to automate security in software development\n\n[Automation](https://docs.gitlab.com/ee/topics/autodevops/) comes in all shapes and sizes. Scans and policies can be programmed manually or come as set operations out of the box; scans can be triggered automatically at code commit or manually initiated; and these scans can result in automated remediation and reports or they can require human intervention. Here are four ways automated security testing can be integrated into your software development practices:\n\n1. Automate security scans for every code change by [running SAST scans](https://docs.gitlab.com/ee/user/application_security/sast/index.html). For ease of assessment, results should be sorted by the priority level of the vulnerability.\n\n1. Scan results should automatically initiate a work ticket or issue, or may stop a build depending on the policy in place. These results should be presented to the developer – in the workspace or IDE in use to avoid context switching – for instant remediation.\n\n1. Policies are automatically applied upon code commit with the option to capture and approve exceptions as needed.\n\n1. Analyze running web applications for known vulnerabilities [using DAST scans](https://docs.gitlab.com/ee/user/application_security/dast/). In GitLab, DAST scans can be automated by [including the CI job in your existing .gitlab-ci.yml file](https://docs.gitlab.com/ee/user/application_security/dast/#configuration), or by [using Auto DAST](https://docs.gitlab.com/ee/topics/autodevops/stages.html#auto-dast).\n\n\n\n## 5 Benefits of automated security\n\nIn addition to making jobs easier across development, security, and operations, automated security testing will help your team produce a safer and better-quality result.\n\n1. **Reduced human error.** Across all functions, automation reduces human error by taking the manual work out of tedious processes that rely on excessive attention to detail.\n\n1. **Early security intervention.** By placing security earlier in the SDLC, threats and vulnerabilities can be detected and addressed faster – hopefully before there’s even a chance that they’re exposed.\n\n1. **Streamlined vulnerability triage.** Automated scan reports can present the threat level of any vulnerability so that developers and security engineers alike can decide which must be addressed immediately and who is responsible for resolving the problem.\n\n1. **Repeatable security checks.** Any automated task should be repeatable, which means that all code can be reviewed and assessed the same way every time. This creates a trusted and secure environment and code base, and also helps reviewers identify patterns when results are presented in a consistent manner.\n\n1. **Responsibility clarification.** Automation takes uncertainty out of DevSecOps. Shifting security can cause confusion about who is responsible for what. But automated scans can present remediation options for the party responsible _at that stage of development_.\n\nBut it is also important to find a productive balance between automated security testing and manual work. For example, trying to automate overly rigorous policies may prove detrimental to business objectives and may not be realistically achieved – it’s important to find a balance between policy compliance and efficiency. It’s also key that automation doesn’t obstruct visibility. Make sure there is still a trail of operations to review if necessary – automated processes should still generate reports of what was done, when, and why the action was triggered. Last, but certainly not least: Automation is **not** meant to replace human beings. It is a tool meant to make their work more efficient and help them produce better results for the team, the business, and the customer.\n\n## Security automation vs. security orchestration\n\nThough they are different concepts, security automation and security orchestration perform similar functions. One serves the other to make security processes more efficient. \n\nSecurity automation focuses on automating individual tasks (possibly with AI technology) to simplify essential processes for security analysts. On the flip side, security orchestration connects tools in use alongside automation and streamlines the whole security procedure. Orchestration drives efficient automation.\n\n## Types of security automation tools\n\nTo keep track of security incidents (and prevent them in the future), teams use security automation tools and different types of security scanning. A few common types of security automation tools include:\n\n- Security Information and Event Management (SIEM): SIEMs help to automatically collect data across multiple sources and use it to give contextual background about security incidents.\n- Security Orchestration, Automation, and Response (SOAR): SOAR takes SIEM a step further than just contextual data collection and adds automated response options to the mix. SOAR alerts security analysts to problems and shuts down cyber threats automatically. \n- Extended Detection and Response (XDR): This proactive, automated solution combines SIEM, SOAR, and other security options into one managed source.\n\n## How security automation works with security analysts\n\nA human can’t do all of the necessary security work, nor can a security automation tool. It’s a symbiotic relationship to ensure that an organization feels the least amount of negative impact from a cyber attack possible. \n\nA security analyst, responsible for vulnerability management by identifying and resolving security flaws and conducting [audits](https://about.gitlab.com/blog/what-you-need-to-know-about-devops-audits/), gets a lot of help from automation. An automated security system can make someone aware of a problem and even help to resolve it while removing manual time constraints.\n\n**Read more about DevSecOps:**\n* [Efficient DevSecOps: 9 tips for shifting left](https://about.gitlab.com/blog/efficient-devsecops-nine-tips-shift-left/)\n* [Want better DevSecOps? Try cross-functional collaboration](https://about.gitlab.com/blog/achieve-devsecops-collaboration/)\n* [Compliance made easy with GitLab](https://about.gitlab.com/blog/compliance-made-easy/)\n* [How application security engineers can use GitLab to secure their projects](https://about.gitlab.com/blog/secure-stage-for-appsec/)\n\nCover image by [Daniele Levis Pelusi](https://unsplash.com/@yogidan2012) on [Unsplash](https://unsplash.com/photos/Pp9qkEV_xPk)\n{: .note}\n\n\n\n",[722,787,9,1427],"zero trust",{"slug":1429,"featured":6,"template":699},"devsecops-security-automation","content:en-us:blog:devsecops-security-automation.yml","Devsecops Security Automation","en-us/blog/devsecops-security-automation.yml","en-us/blog/devsecops-security-automation",{"_path":1435,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1436,"content":1442,"config":1447,"_id":1449,"_type":13,"title":1450,"_source":15,"_file":1451,"_stem":1452,"_extension":18},"/en-us/blog/devsecops-security-standardization",{"title":1437,"description":1438,"ogTitle":1437,"ogDescription":1438,"noIndex":6,"ogImage":1439,"ogUrl":1440,"ogSiteName":685,"ogType":686,"canonicalUrls":1440,"schema":1441},"DevSecOps basics: 5 steps to standardize (and then scale) security","DevSecOps is incomplete without speed and scale. Standardize security to make it happen.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663613/Blog/Hero%20Images/devsecops-security-standardization.jpg","https://about.gitlab.com/blog/devsecops-security-standardization","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevSecOps basics: 5 steps to standardize (and then scale) security\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2020-07-20\",\n      }",{"title":1437,"description":1438,"authors":1443,"heroImage":1439,"date":1444,"body":1445,"category":718,"tags":1446},[1423],"2020-07-20","\n_This is the fifth in our five-part series on [DevSecOps](/topics/devsecops/) basics. Part one offers nine tips to truly [shift left](https://about.gitlab.com/blog/efficient-devsecops-nine-tips-shift-left/). Part two outlines the steps needed to create [silo-free collaboration](https://about.gitlab.com/blog/achieve-devsecops-collaboration/). Part three looks at the importance of [automated security testing](https://about.gitlab.com/blog/devsecops-security-automation/). And part four details how to create a [strong security culture](https://about.gitlab.com/blog/security-culture-devsecops/)._\n\nStandardizing security policies comes in a variety of forms: regulatory compliance, access controls, acceptable use policies, security as code, and automation, to name a few. Ultimately, the idea is that your security team knows exactly what policies and methods have been used or applied to each project. \n\nThe goals of standardization are consistency, traceability, and repeatability. By consistently using the same security methods across all work, security knows what has been protected and what hasn’t. This helps them apply additional measures where necessary, and makes them aware of any needed exceptions. Ensuring that security methods are repeatable helps to expand adoption and scale security to the entire organization or enterprise. \n\n## Building a standardized security program\n\nA holistic security program should be composed of different levels of policies and compliance. Some policies should be company-wide, such as an [acceptable use policy](https://whatis.techtarget.com/definition/acceptable-use-policy-AUP), some will fulfill regulations like the [GDPR](https://gdpr-info.eu/) or [CCPA](https://oag.ca.gov/privacy/ccpa), and some will be specific to certain organizations within your business. \n\n### Standardizing security in DevOps\n\n[DevSecOps can be executed sustainably](/solutions/security-compliance/) at scale with standardized security practices. Here are five ways to standardize security across all of your development projects.\n\n#### Educate\n\nProvide security training and education to every employee. Companywide security initiatives [help to build a security culture](https://about.gitlab.com/blog/security-culture-devsecops/) and empower employees to take responsibility for security in their own work. Standardized training also spreads awareness of mandatory policies and alerts employees to the actions taken to both secure day-to-day operations and protect their customers. \n\n#### Coordinate\n\nCoordinate a predefined set of security requirements among dev, sec, and ops that can be coded into your pipeline and applied to every project. These can ensure regulatory compliance, foster secure coding practices, trigger red flags or notifications, and educate employees on security best practices.\n\n#### Authenticate\n\nAccess controls are a critical component of any security framework, and should be continually monitored and evaluated. By keeping close tabs on who needs access to what, you’re able to build a solid wall around your most critical processes and assets. This eliminates unnecessary access to sensitive data, and helps streamline tracing, recovery, and remediation efforts when something goes wrong. Access control policies also help defend your business by enhancing authentication requirements.\n\n#### Integrate\n\nEmbed scan and test tools within your development pipeline. Static and dynamic application security testing (SAST and DAST, respectively) can be set to run at every code commit and in the review app. Other tools and tests include IAST, fuzzing, licence compliance, container scanning, and dependency scanning (among others). Embedding tools directly into the pipeline allows you to know exactly what the code has been evaluated for, and also what the code has not been checked for. \n\n#### Automate\n\nIn DevSecOps, automation is the true key to standardized security practices as it allows for fast, secure development at scale. There are a number of ways to automate security within and around your development pipeline – the trick is to find an appropriate balance between automation and manual intervention. Automated policies should serve as guardrails that guide development smoothly from one security check to the next, but they should also allow for exceptions when needed. These guardrails should automatically generate reports from code scans and consolidate them into a [security dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/) for review. This helps to minimize human error and any false positives or negatives, allows for consistent vulnerability reporting, and can be used to measure a team’s performance against secure coding expectations. Automation also helps to prevent overly complex security programs by reducing ad-hoc policies and redundant work.\n\n## The best security programs will change\n\nSecurity will never be a set-it-and-forget-it practice. The threat landscape is constantly changing, external regulations will continue to evolve, and internal business requirements will always keep you on your toes. While setting standards for security will help your team manage the workload, these standards need to be constantly re-evaluated and updated. Outdated security practices will undermine even the most solid programs, so it’s important to use part of the time saved from standardizing and automating to plan for the future. \n\n_How efficient are your DevSecOps practices? [Take our DevSecOps Maturity Assessment to find out.](https://about.gitlab.com/resources/devsecops-methodology-assessment/)_\n\n**Learn more about DevSecOps:**\n* [Case Study: How Jasper Solutions offers “DevSecOps in a box” with GitLab”](https://about.gitlab.com/customers/jasper-solutions/)\n* [How to capitalize on GitLab Security tools with external CI](https://docs.gitlab.com/ee/integration/jenkins.html)\n* [How to overcome toolchain security challenges with GitLab](https://about.gitlab.com/blog/toolchain-security-with-gitlab/)\n\nCover image by [Andrew Ridley](https://unsplash.com/@aridley88) on [Unsplash](https://unsplash.com/photos/jR4Zf-riEjI)\n{: .note}\n",[722,787,9,1427],{"slug":1448,"featured":6,"template":699},"devsecops-security-standardization","content:en-us:blog:devsecops-security-standardization.yml","Devsecops Security Standardization","en-us/blog/devsecops-security-standardization.yml","en-us/blog/devsecops-security-standardization",{"_path":1454,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1455,"content":1461,"config":1467,"_id":1469,"_type":13,"title":1470,"_source":15,"_file":1471,"_stem":1472,"_extension":18},"/en-us/blog/dont-confuse-these-twelve-shortcuts-with-iteration",{"title":1456,"description":1457,"ogTitle":1456,"ogDescription":1457,"noIndex":6,"ogImage":1458,"ogUrl":1459,"ogSiteName":685,"ogType":686,"canonicalUrls":1459,"schema":1460},"Don’t confuse these 12 shortcuts with iteration","Iteration is a GitLab value. Sid Sijbrandij, GitLab’s co-founder and CEO, discusses 12 shortcuts that are not iterations to help refine what is considered a good iteration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663383/Blog/Hero%20Images/tanuki-bg-full.png","https://about.gitlab.com/blog/dont-confuse-these-twelve-shortcuts-with-iteration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Don’t confuse these 12 shortcuts with iteration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2021-12-01\",\n      }",{"title":1456,"description":1457,"authors":1462,"heroImage":1458,"date":1464,"body":1465,"category":693,"tags":1466},[1463],"Sid Sijbrandij","2021-12-01","\n\n[Iteration](https://handbook.gitlab.com/handbook/values/#iteration) is one of GitLab’s [top 3 values](https://handbook.gitlab.com/handbook/values/#hierarchy) because it enables everyone to be efficient in how they deliver value to customers and the wider community. Iteration helps us [build our product and improve our work lives](/blog/power-of-iteration/). \n\nIn essence, iteration is reducing the scope of your next piece of work to the smallest valuable thing possible so that you can deliver it fast. By reducing the scope and delivering to the user, rather than holding it back while finishing a larger scope of work, you benefit in the following ways: \n\n- Reduce coordination efforts\n- Reduce cancellations\n- Easier and faster reviews\n- Get feedback faster\n\nIterating helps you ensure that your next step is in the right direction. \n\nAs we’ve practiced iteration at GitLab, I’ve found that when it’s not clearly understood, well-intentioned mistakes can happen. Because iteration is fundamental to everything that we do, it’s critical to regularly reinforce and refine what we define as an iteration. \n\nTo help clarify what is iteration, it helps to see examples of what iteration is not. Here are 12 shortcuts that I’ve seen be mistaken as iteration. \n\n## 1. Reducing quality\n\nSome people will take shortcuts, which leads to lower quality in the final product or deliverable. You can’t reduce quality to minimize the scope of an iteration. Your iteration needs to meet the same quality standards you would expect for any of your work. \n\nFor example, in the case of a user interface, every button needs to work and be properly styled and aligned. Nothing should feel out of place or unfinished. You can reduce the amount of functionality, but the functionality that you deliver needs to look and function as expected. \n\n## 2. Avoiding or reducing documentation\n\nWhen defining the scope of an iteration, make sure you include the right information so that the user can properly understand what happened and can derive value from your work. In the case of a new feature, without proper documentation, the recipient may not understand how to best use the feature, which defeats the purpose of delivering it quickly. \n\nIteration will make documentation faster given the reduced scope, so don’t avoid or delay the documentation. \n\n## 3. Compromising on security\n\nYou can’t compromise on security in the spirit of moving faster. An iteration must meet the same security standards and follow all the necessary security practices to ensure that your product and work doesn’t introduce any new vulnerabilities. \n\nAs an example, when building new features in GitLab, no matter how small an iteration is defined, we should always prioritize the protection of customers’ data. \n\n## 4. Delivering something that's not on the recommended path or on by default\n\nTo call an iteration complete, it needs to be on the recommended path or on by default. Otherwise, most users won’t see or benefit from the work.\n\nAt GitLab, in the past, we have made the mistake of considering an iteration complete before making a new feature the default or recommended path for all our users, which then results in fewer users for that functionality. To prevent having functionality in the product that users won’t find, we now require that the feature is on the recommended path and on by default before we call the iteration complete. \n\nFor big changes, such as when a feature may have a big impact on user experience or stability, we use feature flags when initially shipping a feature. This is a good strategy to start delivering something gradually into the product, but we don’t consider the iteration done until that feature flag has been removed and the feature is on by default. \n\n## 5. Shipping something of no value\n\nWhen considering the smallest scope possible for an iteration, the ultimate test needs to be whether it delivers something of value to the end user. Don’t confuse iteration with making progress on an initiative or checking off items on your to-do list. \n\nFor example, when building a new feature you may need to do a fair amount of set up. You may ship to production code that adds a configuration or capability that you’ll need to build the feature, but it’s completely transparent to the user. While that can be considered progress on your project timeline, it is not an iteration. The iteration completes when the user can start to derive some value from your efforts. \n\n## 6. An excuse to focus on unimportant items\n\nIteration will help you move faster and deliver more things, but you still have to prioritize and focus on what’s most important. When picking what to work on, you shouldn’t do first what’s smallest in scope. Instead, pick what will give you the highest value for the effort you’ll put in.\n\n## 7. Changing or lowering goalposts\n\nChanging a goal or lowering a goal is not iteration. Iteration is reducing the scope and keeping it small, but the reduced scope still needs to meet your goals. As you practice iteration, you may set goals for smaller time periods, which is a good practice I recommend. But changing the goal post is not a part of iteration. \n\n## 8. Revisions you don't ship or publish\n\nIt’s a common mistake to confuse revisions with iterations. To clearly understand the difference, see whether you’ve shipped or delivered something of value to the end user. If you haven’t, it is a revision, not an iteration. \n\nFor example, if when writing a blog post you get a draft reviewed and rewritten several times before publishing, those are considered revisions. Your first iteration is completed once you’ve published the first version of the blog post. \n\n## 9. An excuse to impose unrealistically tight timelines\n\nIf you set a timeline, it has to be realistic. I’ve seen instances in which people confuse iteration with just shrinking the timeline to something unrealistic. That is not iterating. Iteration is minimizing scope, but it requires a disciplined review of the scope to ensure that you’re allocating the right amount of time to complete it. \n\n## 10. An excuse to avoid planning\n\nSometimes teams confuse iteration with moving quickly on something without planning. This is not iterating. By reducing the scope, there will be less planning involved compared to the initial larger scope. But, no matter how small you make the scope of work, you need to plan, and that planning can be quite involved. You need to set an appropriate timeline to deliver the work and plan appropriately for reviews and dependencies. \n\nNot planning appropriately for an iteration negatively impacts efficiency, team morale, and can impact people’s lives outside of work. This needs to be avoided. \n\n## 11. Imposing long hours\n\nDon’t confuse iteration with imposing long working hours for a team. The goal of iterating is to scope work in a way that helps you deliver more tangible value within the same amount of time. Increasing the number of hours that the team works is not iteration. \n\n## 12. Expecting others to fix your work\n\nWhen you iterate, you need to take ownership and make sure that the end result is of value and meets all expectations of a finished product. You should not call an iteration complete if the work still requires fixing in order to be of value or meet quality expectations. \n\nAs GitLab grows as a company, reinforcing our Iteration value and staying clear about what is an iteration is fundamental to us [staying a startup](/company/still-a-startup/). I hope these 12 examples that are not iteration are helpful and empower everyone to help identify and correct situations when iteration is used incorrectly. Using iteration correctly will help us continue to move fast and deliver more value to our customers. It will also help keep the day-to-day momentum as we deliver valuable results. \n\nWatch this GitLab Unfiltered video where I discuss these 12 shortcuts that are not iteration.\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n  \u003Ciframe src=\"https://www.youtube.com/embed/BW6TWwNZzIQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n",[722,9,743],{"slug":1468,"featured":6,"template":699},"dont-confuse-these-twelve-shortcuts-with-iteration","content:en-us:blog:dont-confuse-these-twelve-shortcuts-with-iteration.yml","Dont Confuse These Twelve Shortcuts With Iteration","en-us/blog/dont-confuse-these-twelve-shortcuts-with-iteration.yml","en-us/blog/dont-confuse-these-twelve-shortcuts-with-iteration",{"_path":1474,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1475,"content":1481,"config":1486,"_id":1488,"_type":13,"title":1489,"_source":15,"_file":1490,"_stem":1491,"_extension":18},"/en-us/blog/elite-team-strategies-to-secure-software-supply-chains",{"title":1476,"description":1477,"ogTitle":1476,"ogDescription":1477,"noIndex":6,"ogImage":1478,"ogUrl":1479,"ogSiteName":685,"ogType":686,"canonicalUrls":1479,"schema":1480},"How elite DevOps teams secure the software supply chain","The time is now to integrate security into your DevOps processes - your business will be better for it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667466/Blog/Hero%20Images/GitLab-Sec.png","https://about.gitlab.com/blog/elite-team-strategies-to-secure-software-supply-chains","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How elite DevOps teams secure the software supply chain\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2022-01-06\",\n      }",{"title":1476,"description":1477,"authors":1482,"heroImage":1478,"date":1483,"body":1484,"category":787,"tags":1485},[1070],"2022-01-06","\nIn 2022, the question is not if DevOps teams will integrate security into the software supply chain, but when and how quickly. The high-profile supply chain attacks of 2020 and 2021 have forced organizations to do more to protect themselves and their customers. Every DevOps team should strive to be an elite team in this area, aka [DevSecOps](/topics/devsecops/), as doing less will leave your software supply chains vulnerable.\n\nWhile many organizations might have been hesitant to blend security and DevOps over fears of how it would impact deployment schedules and performance, Google Cloud’s DevOps Research and Assessment (DORA) team concluded in its [“Accelerate State of DevOps 2021 Report”](https://services.google.com/fh/files/misc/state-of-devops-2021.pdf) that “development teams that embrace security see significant value driven to the business.”\n\nTeams that integrate security practices throughout their development process are 1.6 times more likely to meet or exceed their organizational goals, according to the report, which is co-sponsored by GitLab. Meantime, elite performers that met or exceeded their reliability targets were twice as likely to have security integrated into their development process.\n\nTo get to this elite level, though, security has to be baked into DevOps processes at the earliest stages. DevOps and security teams need to collaborate to ensure that they understand one another’s goals and speak the same technical language so they can develop DevSecOps best practices that effectively and efficiently satisfy those goals.\n\nOur newly released [“Guide to Software Supply Chain Security”](https://learn.gitlab.com/devsecops-aware/software-supply-chain-security-ebook) explains the urgency of protecting the supply chain now – no one wants a repeat of the SolarWinds or Colonial Pipeline attacks – and how the U.S. government will soon require many organizations to do so.\n\nWe help DevOps teams frame what it means to be elite, including moving beyond basic protections (using strong passwords, applying software patches in a timely manner, and implementing multi-factor authentication) to deploying these best practices:\n\n* Apply common controls for security and compliance\n* Automate common controls and CI/CD\n* Apply zero-trust principles\n* Inventory all tools and access, including infrastructure as code\n* Consider unconventional scale to find unconventional vulnerabilities\n* Secure containers and orchestrators\n\nThe guide also explains in detail the types of security scans that bolster supply chain security, including container scanning, dependency scanning, fuzz testing, dynamic application security testing (DAST) and static application security testing (SAST), license compliance, and secret detection.\n\nFor those unsure where they fall on the spectrum of supply chain security readiness, we’ve developed a two-minute quiz that examines how you handle the security of APIs, dependencies, and other critical areas.  Use your ranking to plot your transformation to an elite team.\n\nAs the DORA report showed, there is room for improvement across the industry as fewer than two-thirds of DevOps teams are doing these simple security practices:\n\n* 63% invite InfoSec teams early and often\n* 60% perform security reviews\n* 58% test for security\n* 54% integrate security reviews into every phase\n* 49% build pre-approved code\n\nThere is little doubt that 2022 will have more high-profile supply chain attacks, but our guide can help you develop DevOps security processes that will protect your organization and your customers.\n\n## Read more on elite teams and supply chain security here:\n- [How to make your DevOps team elite performers](/blog/how-to-make-your-devops-team-elite-performers/)\n- [How a DevOps Platform helps protect against supply chain attacks](https://about.gitlab.com/blog/devops-platform-supply-chain-attacks/)\n- [DevSecOps FAQ: Get up to speed](https://about.gitlab.com/blog/devsecops-faq-get-up-to-speed-on-this-hot-devops-area/)\n",[722,787,9],{"slug":1487,"featured":6,"template":699},"elite-team-strategies-to-secure-software-supply-chains","content:en-us:blog:elite-team-strategies-to-secure-software-supply-chains.yml","Elite Team Strategies To Secure Software Supply Chains","en-us/blog/elite-team-strategies-to-secure-software-supply-chains.yml","en-us/blog/elite-team-strategies-to-secure-software-supply-chains",{"_path":1493,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1494,"content":1499,"config":1508,"_id":1510,"_type":13,"title":1511,"_source":15,"_file":1512,"_stem":1513,"_extension":18},"/en-us/blog/embedded-views-the-future-of-work-tracking-in-gitlab",{"config":1495,"ogImage":1496,"title":1497,"description":1498},{"noIndex":6},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099072/Blog/Hero%20Images/Blog/Hero%20Images/agile_agile.png_1750099072322.png","Embedded views: The future of work tracking in GitLab","Learn how embedded views, powered by GitLab Query Language, help GitLab teams work more efficiently, make data-driven decisions, and maintain visibility across complex workflows.",{"title":1497,"description":1498,"authors":1500,"date":1504,"body":1505,"category":1506,"tags":1507,"heroImage":1496},[1501,1502,1503],"Matthew Macfarlane","Himanshu Kapoor","Alex Fracazo","2025-08-21","Ever find yourself switching between tabs in GitLab just to keep track of\nwhat’s happening in your project? Maybe you’re checking on an issue, then\njumping to a merge request, then over to an epic to see how everything\nconnects. Before you know it, you’ve got a browser full of tabs and you’ve\nlost your train of thought.\n\nIf that sounds familiar, you’re definitely not alone. So many teams waste time and energy flipping through various items in their project management software, just trying to get a handle on their work.\n\nThat's why we created [embedded views](https://docs.gitlab.com/user/glql/#embedded-views), powered by [GitLab Query Language (GLQL)](https://docs.gitlab.com/user/glql/). With embedded views, [available in 18.3](https://about.gitlab.com/releases/2025/08/21/gitlab-18-3-released/), you get live, relevant information right where you’re already working in GitLab. No more endless context switching. No more outdated reports. Just the info you need, right when you need it.\n\n## Why embedded views matter\n\nEmbedded views are more than just a new feature, they're a fundamental shift in how teams understand and track their work within GitLab. With embedded views, teams can maintain context while accessing real-time information, creating shared understanding, and improving collaboration without ever leaving their current workflow. It’s about making work tracking feel natural and effortless, so you can focus on what matters.\n\n## How it works: Real-time data right where you need it the most\n\nEmbedded views let you insert live GLQL queries in Markdown code blocks throughout wiki pages, epics, issues, and merge requests. Here's what makes them so useful:\n\n### Always up to date\n\nGLQL queries are dynamic, pulling fresh data each time the page loads, so your embedded views always reflect the current state of your work, not the state when you embedded the view. When changes happen to issues, merge requests, or milestones, a page refresh will show those updates in your embedded view.\n\n### Contextual awareness\n\nUse functions like `currentUser()` and `today()` to make queries context-specific. Your embedded views automatically adapt to show relevant information for whoever is viewing them, creating personalized experiences without manual configuration.\n\n### Powerful filtering\n\nFilter by fields like assignee, author, label, milestone, health status, creation date, and more. Use logical expressions to get exactly the data you want. We support more than 30 fields as of 18.3.\n\n### Customizable display\n\nYou can display your data as a table, a list, or a numbered list. Choose which fields to show, set a limit on the number of items, and specify the sort order to keep your view focused and actionable.\n\n### Availability\n\nYou can use embedded views in group and project wikis, epic and issue descriptions, merge requests, and comments. GLQL is available across all GitLab tiers: Free, Premium, and Ultimate, on GitLab.com, GitLab Self-Managed, and GitLab Dedicated. Certain functionality, such as displaying epics, status, custom fields, iterations, and weights, is available in the Premium and Ultimate tiers. Displaying health status is available only in Ultimate.\n\n## See embedded views in action\n\nThe syntax of an embedded view's source is a superset of YAML that consists of:\n\n* The `query` parameter: Expressions joined together with a logical operator, such as `and`.\n* Parameters related to the presentation layer, like `display`, `limit`, or `fields`, `title`, and `description`\n  represented as YAML.\n\nA view is defined in Markdown as a code block, similar to other code blocks like Mermaid.\n\nFor example:\n\n> Display a table of first 5 open issues assigned to the authenticated user in `gitlab-org/gitlab`.\n> Display columns `title`, `state`, `health`, `description`, `epic`, `milestone`, `weight`, and `updated`.\n\n````yaml\n```glql\ndisplay: table\ntitle: GLQL table 🎉\ndescription: This view lists my open issues\nfields: title, state, health, epic, milestone, weight, updated\nlimit: 5\nquery: project = \"gitlab-org/gitlab\" AND assignee = currentUser() AND state = opened\n```\n````\n\nThis source should render a table like the one below:\n\n![](https://res.cloudinary.com/about-gitlab-com/image/upload/v1755193172/ibzfopvpztpglnccwrjj.png)\n\nAn easy way to create your first embedded view is to navigate to the **More options** dropdown in the rich text editor toolbar. Once in this toolbar, select **Embedded view**, which populates the following query in a Markdown code block:\n\n````yaml\n```glql\nquery: assignee = currentUser()\nfields: title, createdAt, milestone, assignee\ntitle: Issues assigned to current user\n```\n````\n\nSave your changes to the comment or description where the code block appears, and you're done! You've successfully created your first embedded view!\n\n## How GitLab uses embedded views\n\nWhether tracking merge requests targeting security releases, triaging bugs to improve backlog hygiene, or managing team onboarding and milestone planning, we rely on embedded views for mission-critical processes every day. This isn't just a feature we built, it's a tool we depend on to run our business effectively. When you adopt embedded views, you're getting a tested solution that's already helping GitLab teams work more efficiently, make data-driven decisions, and maintain visibility across complex workflows. Simply stated, embedded views can transform how your team accesses and analyzes the work that matters most to your success.\n\nTo learn and see more about how GitLab is using embedded views internally, check out [How GitLab measures Red Team impact: The adoption rate metric](https://about.gitlab.com/blog/how-gitlab-measures-red-team-impact-the-adoption-rate-metric/), and Global Search Release Planning issues for the [18.1](https://gitlab.com/gitlab-org/search-team/team-tasks/-/issues/239), [18.2](https://gitlab.com/gitlab-org/search-team/team-tasks/-/issues/241), and [18.3](https://gitlab.com/gitlab-org/search-team/team-tasks/-/issues/245) milestones.\n\n## What's next\n\nEmbedded views are just the start of [Knowledge Group's](https://about.gitlab.com/direction/plan/knowledge/) vision for work tracking. Learn more about what we're focusing on next in the [embedded views post-GA epic](https://gitlab.com/groups/gitlab-org/-/epics/15249). As embedded views evolve we're committed to making them even more powerful and [accessible](https://gitlab.com/gitlab-org/gitlab/-/issues/548722).\n\n## Share your experience\n\nShare your feedback in the [embedded views GA feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/509792) or via the [embedded views GA survey](https://gitlab.fra1.qualtrics.com/jfe/form/SV_6PFhgZMBA06kr7E). Whether you've discovered innovative use cases, encountered challenges, or have ideas for improvements, we want to hear from you.\n","agile-planning",[744,495,9],{"featured":6,"template":699,"slug":1509},"embedded-views-the-future-of-work-tracking-in-gitlab","content:en-us:blog:embedded-views-the-future-of-work-tracking-in-gitlab.yml","Embedded Views The Future Of Work Tracking In Gitlab","en-us/blog/embedded-views-the-future-of-work-tracking-in-gitlab.yml","en-us/blog/embedded-views-the-future-of-work-tracking-in-gitlab",{"_path":1515,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1516,"content":1521,"config":1527,"_id":1529,"_type":13,"title":1530,"_source":15,"_file":1531,"_stem":1532,"_extension":18},"/en-us/blog/engineering-managers-automate-their-jobs",{"title":1517,"description":1518,"ogTitle":1517,"ogDescription":1518,"noIndex":6,"ogImage":1086,"ogUrl":1519,"ogSiteName":685,"ogType":686,"canonicalUrls":1519,"schema":1520},"How GitLab automates engineering management","At GitLab we know automation is engineering's best friend. Here's a deep\ndive into three scripts we use regularly to keep big projects on track.","https://about.gitlab.com/blog/engineering-managers-automate-their-jobs","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab automates engineering management\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Seth Berger\"}],\n        \"datePublished\": \"2021-11-16\",\n      }",{"title":1517,"description":1518,"authors":1522,"heroImage":1086,"date":1524,"body":1525,"category":300,"tags":1526},[1523],"Seth Berger","2021-11-16","As an engineer, figuring out how to automate your work becomes an important\naspect of your job. From writing powerful dotfiles, to customizing bash\nscripts, to writing robust and rigorous tests, engineers regularly look for\nways to automate their repetitive work. \n\n\nAt GitLab, engineering managers are no different and are constantly looking\nfor ways to automate their work. I asked engineering managers at GitLab to\nshare their automation scripts and their responses were overflowing. \n\n\nFrom automating their [1:1 document\ncreation](https://www.youtube.com/watch?v=gqFbZi8Hyoc), to integrating\n[GitLab with Google Sheets](https://gitlab.com/-/snippets/2200407), to\nwriting utilities to [provide executive\nsummaries](https://gitlab.com/gitlab-org/secure/tools/report-scripts),\nGitLab team members take advantage of the [rich API that\nGitLab](https://docs.gitlab.com/ee/api/) provides to organize the mountains\nof information that they sort through on a regular basis. \n\n\nFor this blog post, I’m sharing a\n[repo](https://gitlab.com/gitlab-org/secure/tools/epic-issue-summaries) that\ncontains just a few of the many scripts that our team members use. These\nscripts were originally written by engineering manager [Rachel\nNienaber](/company/team/#rnienaber). Rachel’s Infrastructure team is tasked\nwith the exciting work of coordinating large scale infrastructure and code\nimprovements. The work involves coordinating and sequencing lots of issues\nand epics, and ensuring the work gets done at just the right time and in the\nright order. Because of the breadth and scale of the work, she has created a\nhandful of scripts that parse issues and epics in order to gain better\nvisibility into the work that needs to be done. \n\n\nIn the repo, there are three scripts. I’ll provide a quick overview of the\nfirst two, and then dive into the code on the last one. \n\n\n* [Issues not in epics\n](https://gitlab.com/gitlab-org/secure/tools/epic-issue-summaries/-/blob/master/issues_not_in_epics.rb)\n\n* [Epic\nsummary](https://gitlab.com/gitlab-org/secure/tools/epic-issue-summaries/-/blob/master/epic_summary.rb)\n\n* [Epic/Issue relationship\n](https://gitlab.com/gitlab-org/secure/tools/epic-issue-summaries/-/blob/master/epic_issue_relationships.rb)\n\n\n**Issues not in epics**\n\n\nSince the Infrastructure team leans on\n[epics](https://docs.gitlab.com/ee/user/group/epics/) to organize their\nissues, they also want to be able to organize work that may not be part of\nan epic. The\n[`issues_not_in_epics.rb`](https://gitlab.com/gitlab-org/secure/tools/epic-issue-summaries/-/blob/master/issues_not_in_epics.rb)\nscript iterates through issues not in an epic and updates the description of\na single hard-coded\n[issue](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/538)\nwith a table summarizing those issues. The script is run on a daily basis\nvia a scheduled pipeline. This ensures that issues do not slip through the\ncracks. \n\n\n**Epic summary**\n\n\nThis script,\n[`epic_summary.rb`](https://gitlab.com/gitlab-org/secure/tools/epic-issue-summaries/-/blob/master/epic_summary.rb),\nwas written to solve the problem of having to look in multiple places to\nunderstand the status of each project. By grouping all status information\ninto one place it’s easy to see what the team is working on, and what\nprojects will be coming up next. \n\n\nAs input it takes a designated epic ID and updates the description of that\nepic by crawling sub-epics and extracting the following data from those\nepics:\n\n\n* The person responsible for delivering a sub-epic (at GitLab we use the\nterm [Directly Responsible Individual or\nDRI](/handbook/people-group/directly-responsible-individuals/))\n\n* The latest status update for the epic as inputted by an engineer in an\nepic description\n\n* The number of sub-epics\n\n* Links to a board showing the issues constituting that epic\n\n\nYou can see an example of the output from the script on this\n[epic](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/148).\n\n\nPart of what makes this script simple is that the Infrastructure team always\nupdates the bottom of all their epic descriptions with the following\nmarkdown.\n\n\n```markdown\n\n## Status {DATE}\n\n{commentary of the status}\n\n```\n\n\nBy consistently using that very simple markdown, the following snippet of\ncode can reliably extract the status for each epic:\n\n\n```rb\n if description!= nil && description.index(\"## Status\")\n\n    end_location = description.length\n\n    if description.index(\"mermaid\")\n      end_location = description.index(\"mermaid\")-6\n    end\n\n    status = description[description.index(\"## Status\")+10..end_location]\n  end\n```\n\n\nThe code above certainly won’t win any algorithm challenges, but that’s kind\nof the point and what we aim to do with [boring\nsolutions](/blog/boring-solutions-faster-iteration/). \n\n\nYou’ll notice the code above adjusts what is parsed to exclude a mermaid\ndiagram that might appear after the `## Status` markdown.  That diagram gets\nmaintained with the\n[epic_issue_relationship.rb](https://gitlab.com/gitlab-org/secure/tools/epic-issue-summaries/-/blob/master/epic_issue_relationships.rb)\nscript. \n\n\n**Epic issue relationship**\n\n\nThis script updates either a specific epic or all epics, depending on the\ncommand line option,  with a [mermaid\ndiagram](https://mermaid-js.github.io/) that shows the relationship between\nissues and the order that those issues need to be completed by examining how\nthey are related to one another. Adding a mermaid diagram to the description\nwas introduced by [Sean McGivern](/company/team/#smcgivern), a staff\nengineer on the Scalability team. It creates brilliant diagrams like this\none from this\n[epic](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/579).\n\n\n![Mermaid\nDiagram](https://about.gitlab.com/images/blogimages/2021-11-16-engineering-managers/issue_relation.png)\n\n\nLet’s walk through the code.\n\n\nThe script uses the Docopt gem to parse and accept several input\nparameters. \n\n\n```rb\n\noptions = Docopt::docopt(docstring)\n\ntoken = options.fetch('--token')\n\ngroup_id = options.fetch('--groupid')\n\nepic_id = options.fetch('--epicid', nil)\n\ndry_run = options.fetch('--dry-run', false)\n\n```\n\nThen a connection to the GitLab instance is created, taking advantage of the\n[GitLab gem](https://github.com/NARKOZ/gitlab) which is extended in\n[`lib/gitlab_client/epics.rb`](https://gitlab.com/gitlab-org/secure/tools/epic-issue-summaries/-/blob/main/lib/gitlab_client/epics.rb)\nto include a few extra methods. \n\n\n```rb\n\nGitlab.configure do |config|\n  config.endpoint = 'https://gitlab.com/api/v4'\n  config.private_token = token\nend\n\n```\n\n\nIf an epic id is passed in, then the `update_mermaid` will run only for a\nspecific epic. Otherwise, the code searches for epics that match the two\nlabels, `workflow-infra::In Progress` and `team::Scalability` and are also\n`opened`. Only when the matching epics do not have child epics,  is\n`update_mermaid` run. \n\n\n```rb\n\nif epic_id\n  update_mermaid(token: token, group_id: group_id, epic_id: epic_id, dry_run: dry_run)\nelse\n  Gitlab.epics(group_id, 'workflow-infra::In Progress,team::Scalability', options: { state: 'opened' }).each do |epic|\n    if Gitlab.epic_epics(epic['group_id'], epic['iid']).count == 0\n      update_mermaid(token: token, group_id: group_id, epic_id: epic['iid'], dry_run: dry_run)\n    end\n  end\nend\n\n```\n\nFinally the most exciting part of the script is the method `update_mermaid`\nmethod. \n\n\nBelow the code sets up variables, and looks to see if a mermaid diagram\nexists in the epic description that it should populate. Note, that if a\nmermaid diagram does not exist in the epic already, this script will not\ncreate one. Each epic should already have a mermaid diagram placeholder\ninserted after the status header.\n\n\n```rb\n\ndef update_mermaid(token:, group_id:, epic_id:, dry_run:)\n  in_epic = Set.new\n  from_relations = Set.new\n  relations = Set.new\n  mermaid = ['graph TD']\n  original_description = Gitlab.epic(group_id, epic_id).description\n\n  unless original_description =~ MERMAID_REGEX\n    puts \"#{epic_id} does not have a Mermaid diagram\"\n    return\n  end\n```\n\n\nNext the code iterates through each of the issues in the epic and assigns a\ngraph_id for each issue that will be part of the mermaid diagram. It also\nadds the `key_fields` to the `in_epic` Set. The code assigns `title` along\nwith an emoji so that the mermaid diagram is visually richer. After that the\ngraph nodes are added to the mermaid diagram. \n\n\n```rb\n Gitlab.epic_issues(group_id, epic_id).each do |issue|\n    iid = issue['iid']\n    graph_id = id(issue)\n\n    in_epic \u003C\u003C key_fields(issue)\n\n    title = \"##{iid}\"\n    title = \"🎯 #{title}\" if issue['labels'].include?('exit criterion')\n    if issue['state'] == 'closed'\n      title = \"✅ #{title}\"\n    elsif issue['assignees'].any?\n      title = \"⏳ #{title}\"\n    end\n\n    mermaid \u003C\u003C \"  #{graph_id}[\\\"#{title}\\\"]\"\n    mermaid \u003C\u003C \"  click #{graph_id} \\\"#{issue['web_url']}\\\" \\\"#{issue['title'].gsub('\"', \"'\")}\\\"\"\n\n```\n\nAfter adding the graph nodes above, the code iterates through the links\nassociated with each issue. The code determines if the issue is blocked by\nor blocks another issue. Knowing the direction of this relationship defines\nwhich direction the arrow in the mermaid diagram should point.  \n\n\nThe code also adds both the issue and link to the `from_relations` set,\nwhich will automatically deduplicate entries.\n\n\n```rb\n    Gitlab.issue_links(issue['project_id'], issue['iid']).each do |link|\n      case link['link_type']\n      when 'is_blocked_by'\n        source = id(link)\n        destination = graph_id\n      when 'blocks'\n        source = graph_id\n        destination = id(link)\n      else\n        next\n      end\n\n      from_relations \u003C\u003C key_fields(issue)\n      from_relations \u003C\u003C key_fields(link)\n\n      unless relations.include?([source, destination])\n        mermaid \u003C\u003C \"  #{source} --> #{destination}\"\n        relations \u003C\u003C [source, destination]\n      end\n    end\n```\n\n\nFinally, the code looks at the “extra” issues, which are issues that are not\ndirectly part of the epic, but are related to issues in the epic. These are\nthe most important issues to ensure are on the diagram, since they represent\nissue dependencies that are outside the epic and would otherwise not show up\nwhen viewing an epic page in GitLab. \n\n\nThe code then updates the epic description by calling the GitLab API and\nsetting the new description. \n\n\n```rb\n  (from_relations - in_epic).each do |extra_issue|\n    mermaid \u003C\u003C \"  #{id(extra_issue)}[\\\"❌ ##{extra_issue['iid']}\\\"]\"\n    mermaid \u003C\u003C \"  click #{id(extra_issue)} \\\"#{extra_issue['web_url']}\\\" \\\"#{extra_issue['title'].gsub('\"', \"'\")}\\\"\"\n  end\n\n  mermaid_string = mermaid.join(\"\\n\")\n  new_description = original_description\n                        .gsub(MERMAID_REGEX,\n                              \"\\n\\\\1\\n```mermaid\\n#{mermaid_string}\\n```\\n\")\n\n    Gitlab.edit_epic(group_id, epic_id, description: new_description)\nend\n\n```\n\n\nThe above scripts help engineering managers efficiently know about all the\nissues their team members are working on, the status of their team’s epics\nand how all the work fits together.  \n\n\nThe scripts only rely on team members doing two things manually: \n\n\n* Updating an epic’s status on a periodic basis\n\n* Creating relationships between related issues.  \n\n\nThe scripts can be run as part of a regular scheduled\n[pipeline](https://gitlab.com/gitlab-org/secure/tools/epic-issue-summaries/-/blob/main/.gitlab-ci.yml).\nWith the reports generated on a scheduled basis, engineering managers can\nregularly get summarized information that helps make them and their teams\nmore productive.",[9,790,696],{"slug":1528,"featured":6,"template":699},"engineering-managers-automate-their-jobs","content:en-us:blog:engineering-managers-automate-their-jobs.yml","Engineering Managers Automate Their Jobs","en-us/blog/engineering-managers-automate-their-jobs.yml","en-us/blog/engineering-managers-automate-their-jobs",{"_path":1534,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1535,"content":1541,"config":1547,"_id":1549,"_type":13,"title":1550,"_source":15,"_file":1551,"_stem":1552,"_extension":18},"/en-us/blog/epics-roadmap",{"title":1536,"description":1537,"ogTitle":1536,"ogDescription":1537,"noIndex":6,"ogImage":1538,"ogUrl":1539,"ogSiteName":685,"ogType":686,"canonicalUrls":1539,"schema":1540},"Coming in 11.3: Seamless planning with epics & roadmap","See how you can plan and track larger initiatives even more easily with milestone dates integrated into epics.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672146/Blog/Hero%20Images/epics-issues-milestones-planning.jpg","https://about.gitlab.com/blog/epics-roadmap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Coming in 11.3: Seamless top-down and bottom-up planning with epics and roadmap\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Victor Wu\"}],\n        \"datePublished\": \"2018-08-23\",\n      }",{"title":1542,"description":1537,"authors":1543,"heroImage":1538,"date":1544,"body":1545,"category":300,"tags":1546},"Coming in 11.3: Seamless top-down and bottom-up planning with epics and roadmap",[739],"2018-08-23","\n\n[Epics](https://docs.gitlab.com/ee/user/group/epics/) and [roadmap](https://docs.gitlab.com/ee/user/group/roadmap/)\n are two newer features in [GitLab Ultimate](/pricing/) and [GitLab.com Gold](/pricing/#gitlab-com). Used together, your team\n can plan and track larger initiatives. On September 22, we're shipping a new feature\n which we will help you transition seamlessly between top-down and bottom-up planning.\n\n## First things first: epics vs. issues vs. roadmap\n\nAn epic is similar to an [issue](https://docs.gitlab.com/ee/user/project/issues/) in that it\nrecords a proposed scope of work to be done, allows for team members to discuss that scope,\nand then is tracked and updated over time as that work is actually implemented.\n\nHowever, an epic exists at the [group](https://docs.gitlab.com/ee/user/group/index.html) level (as opposed to an issue, which exists at the [project](https://docs.gitlab.com/ee/user/project/index.html) level). So\nimmediately you see that an epic is designed to reflect a larger scope, and higher level of discussion\ncompared to an issue. Additionally, you can [attach any number of issues to an epic](https://docs.gitlab.com/ee/api/epic_issues.html#assign-an-issue-to-the-epic), with the idea that\nthe epic's scope decomposes into those individual issues.\n\n![epic](https://about.gitlab.com/images/blogimages/epic-view.png){: .shadow.medium.center}\n\nSince an epic is designed to scope work over a longer period of time (several issues' worth),\na timeline-based view in the form of a [roadmap](https://docs.gitlab.com/ee/user/group/roadmap/)\n is also useful: it serves as a visualization to anticipate that work, and track it as it's\n progressively completed. So the roadmap, also scoped at the group level, presents all the\n epics in time for that group.\n\nYou can apply [group labels](https://docs.gitlab.com/ee/user/project/labels.html#project-labels-and-group-labels)\n to epics, making it easy to quickly narrow down to the epics you care about, whether you\n are looking at a list view or a roadmap view.\n\n| Epics list | Roadmap |\n| --- | --- |\n| ![roadmap](https://about.gitlab.com/images/blogimages/epic-list-view.png){: .shadow} | ![roadmap](https://about.gitlab.com/images/blogimages/roadmap-view.png){: .shadow} |\n\n## Long-term vs short-term planning\n\nWhen planning any initiative, uncertainty, by definition, increases further out in\nthe future. You don't know how many resources you will have. You don't know if previous\ndependent work will be finished. You don't know if the market and your customers will change\nsuch that you won't even need that planned out initiative at all.\n\nConversely, the nearer-term future is much more certain. You have a good handle of the work\nthat should be accomplished and that it can be completed within the next few weeks, up to a\nmonth or so.\n\nAnd of course, the work you are doing now, and have already completed in the past, has zero\nuncertainty. You can't change the past.\n\nEpics and roadmap help you plan and track work in all these cases:\n\n### Long-term future: top-down planning\n\nWhen planning far in the future, we use _top-down planning_. We have strategic initiatives\nthat we want to achieve, with approximate scope and timelines. So in this case, you would\ncreate an epic, and assign `Fixed` dates (a planned start date and planned finish date) to it.\nThe epic would appear in the roadmap view, and you would be able to see it positioned further\nin the future.\n\nThis helps high-level planning, such as starting discussions with various departments in\nyour organizations, or presenting a strategic roadmap to your executive leadership. By creating the\nepic early on, it provides a collaborative space for all stakeholders to discuss feasibility\nand further detailed ideas.\n\n### Short-term future: bottom-up planning\n\nWhen planning for the nearer-term future, we use _bottom-up_ planning. So suppose the epic\nyou created previously with fixed dates has gained some traction within your organization.\nPeople are excited about the prospects and want to flesh out detailed designs and implementation\nsteps. You and your team would then start creating issues and attach them to the epic.\n\nEventually, you have scoped out the detailed work in the issues and even assigned milestones to them,\nindicating when they are planned to be worked on. Now, instead of having to manually update the epic\nto reflect the milestone dates, you would simply choose `From milestones` in the epic sidebar. In this\ncase, the epic planned start date becomes a dynamic date reflecting the earliest start date across all\nthe epic's assigned milestones. The same goes for the epic's planned end date too.\n\nThis functionality is coming in GitLab 11.3 – you can [view the original issue here](https://gitlab.com/gitlab-org/gitlab-ee/issues/6470).\n\nAdditionally, the [roadmap bar edges will reflect the fixed or dynamic start and end dates](https://gitlab.com/gitlab-org/gitlab-ee/issues/6471) accordingly.\n\n![inherited-dates](https://about.gitlab.com/images/blogimages/inherited-dates.png){: .shadow.medium.center}\n\nSo with this design, you are in control when you want to seamlessly transition an epic from a\ntop-down planning scenario, to a bottom-up one. The roadmap reflects these dates automatically too,\nso that all your epics are shown together in one view.\n\nPhoto by [Christopher Machicoane-Hurtaud](https://unsplash.com/photos/ewZkOqjl2Ys?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/ewZkOqjl2Ys?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[744,834,790,9],{"slug":1548,"featured":6,"template":699},"epics-roadmap","content:en-us:blog:epics-roadmap.yml","Epics Roadmap","en-us/blog/epics-roadmap.yml","en-us/blog/epics-roadmap",{"_path":1554,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1555,"content":1561,"config":1566,"_id":1568,"_type":13,"title":1569,"_source":15,"_file":1570,"_stem":1571,"_extension":18},"/en-us/blog/epics-three-features-accelerate-your-workflow",{"title":1556,"description":1557,"ogTitle":1556,"ogDescription":1557,"noIndex":6,"ogImage":1558,"ogUrl":1559,"ogSiteName":685,"ogType":686,"canonicalUrls":1559,"schema":1560},"3 Major improvements coming to GitLab Epics","Explore three new features of GitLab Epics to enhance your workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671891/Blog/Hero%20Images/epicsimprovements.jpg","https://about.gitlab.com/blog/epics-three-features-accelerate-your-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 Major improvements coming to GitLab Epics\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2020-01-21\",\n      }",{"title":1556,"description":1557,"authors":1562,"heroImage":1558,"date":1563,"body":1564,"category":1014,"tags":1565},[852],"2020-01-21","\n[Epics](https://docs.gitlab.com/ee/user/group/epics) allow teams to organize work into a useful collection of [issues](https://docs.gitlab.com/ee/user/project/issues), enabling proper planning to hit larger targets or function features. The [Plan:Portfolio Management team](https://handbook.gitlab.com/handbook/engineering/development/dev/plan/product-planning/) has been tinkering away to improve GitLab Epics. \n\n## How planning with epics provides insight  \n\nEpics group themed items to work at different levels, depending on the individual working on the project. Product managers, engineers, and leadership all require different information from projects, and epics enable teams to stay connected without getting lost in a sea of issues.\n\n> “Epics are collections of work that move teams towards a goal and can be business or engineer focused. Operations teams might need to take a closer look at meeting customer needs, solving a problem, or rolling out a feature. An engineer might care about the very specific pieces of work they need to do to accomplish something, to release the next segment, or to put up the next MVC. Epics allow you to organize work, providing visibility at the right level. Different teams are all connected and related with each other, so we're able to stay lockstep in our proper zones.” – [Keanon O'Keefe](/company/team/#kokeefe), Senior Product Manager\n\nEpics break down information silos, since team members can quickly access information across a project. Without epics, teams may have to trudge through thousands of issues to identify relevant data. Cleanly sorting issues into epics results in an increase in visibility and a reduction in cycle time.\n\n## Three improvements \n\nWhen we [released GitLab Epics in 11.3](/blog/epics-roadmap/), our [MVC](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc) offered simple functionality and a roadmap. In recent months, we’ve started to iterate on those original MVCs to provide real value versus function. We're working on three significant iterations on epics to help teams enhance their workflow.\n\n### 1. Create issues from epics\n\nUsers can now create issues directly from epics, removing the step of navigating to a new tab before switching back to relate the issue to the epic. \n\n![A screenshot of a webpage that shows a button called create new issue](https://about.gitlab.com/images/blogimages/createanissuefromepic.png){: .shadow.medium.center}\n\nThis improvement allows users to create a new issue directly from an epic.\n{: .note.text-center}\n\nWith this improvement, users can create issues that correspond to a given epic, enabling them to break down a project into more digestible vertical feature slices.\n\n#### A quality of life improvement \n\nThe Plan:Portfolio team received user feedback that it was frustrating to manually create and relate issues in separate tabs. These additional steps disrupted workflow and didn’t meet expectations on how to efficiently iterate on a roadmap. With this quality of life improvement, users can now quickly iterate on the issues that comprise the larger epic.\n\nBy automatically relating issues to epics, users will no longer “lose” issues if they forget to manually relate them. This feature saves teams time and simplifies organization and planning.\n\n_Follow our progress by visiting [the issue](https://gitlab.com/gitlab-org/gitlab/issues/5419)_\n\n### 2. Weight and progress on roadmaps\n\nWhen viewing a roadmap, users can quickly understand the completion progress of each item. This information helps teams determine whether an epic is on track to be completed by the assigned end date.\n\nIf issues aren't groomed regularly, especially when it comes to issue weights, it can be difficult to trust any progress completion data on a roadmap, because it may not consider the historical velocity of the team implementing it. With this improvement, teams can quickly retrieve accurate, reliable data.\n\n#### An increase in visibility\n\nThis feature gives teams the ability to look at the roadmap and immediately recognize whether a project is behind or ahead of schedule. Previously, users had to manually calculate progress and estimate whether goals were attainable. This new view helps teams report progress outwards, allocate additional support to lagging items, and gain better visibility into the progression towards goals.\n\n![A screenshot of a webpage that shows projects and the corresponding progress and due date](https://about.gitlab.com/images/blogimages/weightandprogressepics.png){: .shadow.medium.center}\n\nThe new view will allow users to see quickly whether a project is on track.\n{: .note.text-center}\n\n> “Team members can self-serve information. And from a product manager’s perspective, especially when you look at a company like ours in which we’re all remote, if I need an update that’s not documented in an issue, I might have to wait until one of my engineers in APAC comes online to answer the question. If everything is demonstrated on a roadmap from a progress complete perspective, that can help me answer questions to customers or upper management on where things are at, without having to wait for a response from somebody else.”  – Keanon O'Keefe\n\nThe idea is that it saves time, because a manager, director, or executive who looks at this roadmap has instant insight into progress. They don't have to ping the engineering manager who may then need to ping the developers. We’ve reduced the necessary steps in communicating progress and the ability to share information.\n\n**Tip**: This feature depends on leveraging [the weight option](https://docs.gitlab.com/ee/user/project/issues/issue_weight.html). Keanon recommends weighting every issue assigned to an epic to provide the most accurate representation of the amount of work associated and your progress.\n\n_Follow our progress by visiting [the issue](https://gitlab.com/gitlab-org/gitlab/issues/5164)_\n\n### 3. Expand epics on roadmaps to view hierarchy\n\nChild epics are now displayed under their parent epics on the roadmap, enabling users to view the hierarchy of work. With this feature, users can get a detailed view of the work involved over a timeline. Because users can view hierarchy, they’re able to quickly see related work to track progress and determine whether an overall project will meet a goal. \n\n![A screenshot of a webpage that shows a parent epic and the child epics, with corresponding progress and due dates](https://about.gitlab.com/images/blogimages/expandepicsinroadmap.png){: .shadow.medium.center}\n\nThis feature gives users a detailed view of work involved over the project timeline.\n{: .note.text-center}\n\n> “What this change will do is allow you to look at a parent epic and actually click on it so that it expands kind of like a drawer. It will show the parent epic, which might be something that someone at an executive or director level cares about, while individual teams or engineering managers can view more granular details with child epics.” – Keanon O'Keefe\n\nThis improvement complements weights and progress on roadmaps, since it provides even more ways to view data and details. \n\n#### An ordered approach\n\nFrom an organizational standpoint, this iteration enables teams to properly sequence their plans. For example, teams that need to build a section of the backend before starting work on the frontend can quickly determine whether they’re on track to meet timelines or how much more work needs to be completed before another team can begin their work. This is our first step in empowering teams to sequence their work, and it’s an opportunity to allow teams to tailor the view of the roadmap to the audience.\n\n“We have a great set of designers, engineering managers, developers, and product people in Plan. I'm very impressed at how quickly everybody rallied together to move things forward. Many of these iterations are difficult from a design perspective, and I’m impressed with how the team approaches challenges and determines how to get the right data and visual at the right level, while allowing teams to drill down or drill up easily.” – Keanon O'Keefe\n\n**Tip**: It’s important to assign start and due dates to epics, whether you leverage the dates that are inherited or ones you set manually, so that child epics are properly displayed on the roadmap in relation to their parent.\n\n_Follow our progress by visiting [the issue](https://gitlab.com/gitlab-org/gitlab/issues/7077)_\n\n### What’s next for GitLab Epics?\n\nThe team is just getting started with its improvements to GitLab Epics. This is an area that we’re committed to accelerating, and we want to offer the most effective tools for product and portfolio management. “We’ve built out a strong team, and we’re hitting our stride and reaching our potential for releasing functionality,” says Keanon. \n\nOne of the next opportunities the Plan:Portfolio Management team will undertake is collaborating with Plan:Certify on [dependency mapping](https://gitlab.com/gitlab-org/gitlab/issues/2035), which will enable teams to surface dependencies on the roadmap. With dependency mapping, development and operations teams can view a sequence of work, identify blockers, and understand whether they’ll meet target deadlines. This feature will help larger organizations plan multiple projects simultaneously, see dependencies, and identify a critical path of work. \n\n> “We are just getting started with delivering tools that offer functionality between different groups. We have some exciting features coming soon.” – Keanon O'Keefe\n\nIf you’re interested in learning more about the team’s direction, please read more about the vision for Epics.\n\n_Thank you to [Keanon O'Keefe](/company/team/#kokeefe), Senior Product Manager, Plan:Portfolio Management, for contributing to this post._\n\nCover image by [Maarten van den Heuvel ](https://unsplash.com/@mvdheuvel?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/8EzNkvLQosk)\n{: .note}\n",[744,696,9],{"slug":1567,"featured":6,"template":699},"epics-three-features-accelerate-your-workflow","content:en-us:blog:epics-three-features-accelerate-your-workflow.yml","Epics Three Features Accelerate Your Workflow","en-us/blog/epics-three-features-accelerate-your-workflow.yml","en-us/blog/epics-three-features-accelerate-your-workflow",{"_path":1573,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1574,"content":1580,"config":1586,"_id":1588,"_type":13,"title":1589,"_source":15,"_file":1590,"_stem":1591,"_extension":18},"/en-us/blog/freedesktop-org-migrates-to-gitlab",{"title":1575,"description":1576,"ogTitle":1575,"ogDescription":1576,"noIndex":6,"ogImage":1577,"ogUrl":1578,"ogSiteName":685,"ogType":686,"canonicalUrls":1578,"schema":1579},"Welcome to GitLab, freedesktop.org!","Freedesktop.org, the home of open source desktop technology development, has migrated to GitLab to improve their workflow and modernize their service.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671252/Blog/Hero%20Images/gitlab-desktop-org-cover.png","https://about.gitlab.com/blog/freedesktop-org-migrates-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Welcome to GitLab, freedesktop.org!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2018-08-20\",\n      }",{"title":1575,"description":1576,"authors":1581,"heroImage":1577,"date":1582,"body":1583,"category":1584,"tags":1585},[1133],"2018-08-20","\nSorry to [keep banging on about it](/blog/drupal-moves-to-gitlab/), but we get pretty excited when [open source projects](/blog/welcome-gnome-to-gitlab/) tell us they’re [#movingtogitlab](/blog/movingtogitlab/). There’s always more room at our inn. So we’re very happy to welcome [freedesktop.org](https://www.freedesktop.org/wiki/) into the fold! We chatted to Daniel Stone, project administrator, about what the project does and why they’re joining us.\n\n## Q & A\n\n- [What is freedesktop.org?](#what-is-freedesktoporg)\n- [How is freedesktop.org used?](#how-is-fdo-used)\n- [What's the connection between freedesktop.org, X Window System, and Linux?](#whats-the-connection-between-fdo-x-window-system-and-linux)\n- [How many contributors work on the project?](#how-many-contributors-work-on-the-project)\n- [Why would someone use freedesktop.org instead of macOS or Microsoft Windows?](#why-would-someone-use-fdo-instead-of-macos-or-microsoft-windows)\n- [Why are you migrating to GitLab?](#why-are-you-migrating-to-gitlab)\n- [How are you anticipating the move to be beneficial?](#how-are-you-anticipating-the-move-to-be-beneficial)\n\n### What is freedesktop.org?\n\nCreated in 2000 by Havoc Pennington (a GNOME developer), freedesktop.org (or fd.o) is a [forge](https://en.wikipedia.org/wiki/Forge_(software))-type hosting site. The idea was to create a neutral collaboration space between [GNOME](/blog/welcome-gnome-to-gitlab/), [KDE](/blog/welcome-kde/), Enlightenment, and other open source desktops. Unlike integrated systems, like Windows and macOS, the open source desktop lacks a lot of shared foundations: what should you open files with, how should you manage windows, and so forth.\n\nOriginally fd.o was a home for these desktop developers to collaborate on common standards, so programs could run portably with the same functionality across different desktops. In 2004, xwin.org was formed by a group of open source graphics developers unhappy with the closed-shop state of the XFree86 project. The two projects of fd.o and xwin.org merged shortly after xwin.org’s founding, with fd.o playing host to the X.Org Foundation, which supervises and facilitates the ongoing development of the graphics stack.\n\nOver the years since, our role as a neutral home for all sorts of desktop technology development has seen us add projects such as GStreamer, LibreOffice, and PulseAudio to our diverse family. Some projects such as systemd and Flatpak originally began their development on fd.o, but moved out to other hosting platforms which better suited their needs and workflow.\n\n### How is fd.o used?\n\nMost of our projects are invisible to users: NetworkManager is probably responsible for driving your Wi-Fi under the hood, though you’re unlikely to interact with it directly. Mesa and Wayland/X.Org will provide the underlying plumbing to render your games and your whole UI, but these are mostly invisible. Your desktop probably leans heavily on the D-Bus message-passing system. Most of it is plumbing.\n\n### What's the connection between fd.o, X Window System, and Linux?\n\nAs part of the graphics stack, fd.o hosts the development of the Linux kernel’s graphics development: drivers from all vendors part of the mainstream kernel (and some which aren’t yet!) use our Git hosting, mailing lists, bug tracking, and other services to build the core kernel graphics infrastructure. All this development happens on our infrastructure, which is then fed into the core Linux kernel during its \"merge window\" every release.\n\nThe X.Org Foundation tries to enable the work of a wide body of open source graphics projects. Originally X.Org itself was just the X Window System, but over the years the code evolved out of X.Org into a number of enabling projects. These include not just alternative window systems such as Wayland, the Mesa 3D graphics library for hardware-accelerated OpenGL, OpenGL ES and Vulkan, Cairo and Pixman for software rendering, libinput for input device handling, and much more. We play host to all those projects, with the Foundation providing an accountable body for administrative work, conference organization, and so on.\n\nOther freedesktop.org projects, as said before, provide all the glue around the margins of your desktop. Providing a database of available applications and preferred MIME type handlers, network device management, inter-process communication, a PDF renderer; in general, all the things we can do well in one place, to enable people who want to write desktop environments to focus on the thing that matters to them: building the actual desktop!\n\nAs part of this, we’ve always tried to stay strenuously vendor-neutral and also project-neutral within the desktop community. Rather than \"picking winners\" or enforcing directions on external projects, we try to slowly and gently build consensus as a neutral forum.\n\n### How many contributors work on the project?\n\nHard to say! We have around 1,300 registered users who directly commit to our family of projects. Not all of them are active of course, but many developers do not have direct commit access and aren’t represented in that figure. We have around 25,000 people subscribed to our various development mailing lists.\n\n### Why would someone use fd.o instead of macOS or Microsoft Windows?\n\nMuch like GitLab, freedesktop.org is an open source, open-participation, neutral platform. Running an open source desktop through distributions such as Arch, Debian, Fedora, or Ubuntu – all of which use our enabling technology – gives the user a fully open source system. This is incredibly empowering: as a user, you have the ability to dive into any part of your system, make the changes you want to see, and participate openly in these projects to see your improvements work upstream.\n\n>As a user, you have the ability to dive into any part of your system, make the changes you want to see, and participate openly in these projects to see your improvements work upstream\n\n### Why are you migrating to GitLab?\n\nOver the years fd.o has been running, we’ve accumulated a wide variety of services: our LDAP-based account system forked back in 2004, Bugzilla for issue tracking, Mailman for mailing lists, cgit and hand-rolled Git hosting, Patchwork for pulling patches from the mailing list when they are submitted for review, Jenkins for build infrastructure, ikiwiki for project wikis, still an FTP server somewhere; the list goes on.\n\nIn terms of workflow, we simply can’t provide some of our projects the workflow they want with this infrastructure. Over the years since we begun, the norm of software development has moved from throwing patches around via email, to fully distributed version control with integrated review and issue tracking, and so on. On paper we provide those services, but integration between them involves a lot of duct tape, and this shows to the users. We saw multiple projects either leave fd.o and move to alternate hosting platforms, or just not develop on our infrastructure to begin with, because we weren’t offering anything like the same level of functionality and convenience as those services.\n\n>Over the years, the norm of software development has moved from throwing patches around via email, to fully distributed version control with integrated review and issue tracking, and so on. On paper we provide those services, but integration between them involves a lot of duct tape, and this shows to the users.\n\nOne of the issues with freedesktop.org being such a diverse family, is that there is no central driven organization behind it. The site is currently run by three volunteers, all of whom keep the site running in our spare time. Maintaining all these services – many of them forked to add now-essential features like spam prevention, as well as our own custom local work for service integration – takes a surprising amount of time, to the point where just keeping it running is about all we can do. Actual improvements are very difficult to implement in the time we have, and even when we can do them, making sure all our projects can take full advantage of them is sometimes too much for us.\n\n### How are you anticipating the move to be beneficial?\n\nFirstly, for the workflow, having linked repository management, issue tracking, code review, CI pipelines and feedback, container repositories, wikis, and websites, provides functionality we couldn’t before – or at least, we were providing a pale imitation of it. As all of this is provided in [GitLab Core](/pricing/) and backed by a single coherent permission model, we are able to open these services up to our member projects who can work with them autonomously, rather than waiting for the admins to deal with services for them.\n\nFrom an admin point of view, having a single application which takes care of all of this will drastically reduce the time we spend treading water and dealing with the impedance mismatch between the disparate services we’ve had until now. Bringing GitLab up on Kubernetes has not been without its challenges as we attempt to bring our service administration skills up into the 21st century, but already it’s shown us that we can move drastically quicker than we have been able to in the past.\n\n>From an admin point of view, having a single application which takes care of our entire workflow will drastically reduce the time we spend treading water and dealing with the impedance mismatch between the disparate services we’ve had until now\n\nIn terms of service modernization, another huge improvement is a modern approach to identity and security. Running an open community site in 2018 is not a fun place to be: not just keeping on top of security vulnerabilities, but targeted break-in attempts and spam. A lot of our previous services aren’t designed to deal with this kind of abuse. Having a single identity service on GitLab – which can link to external identity providers such as Google and GitLab.com, and make use of two-factor authentication – is a huge leap forward for us. Similarly, a coherent approach to spam which doesn’t involve spending an evening trawling through SQL tables by hand makes dealing with spam actually practical!\f\n\n### How can people get involved?\n\nSince we are an umbrella of diverse projects, there's no single answer. We keep a [list of our active projects on our website](https://www.freedesktop.org/wiki/GettingInvolved/): pick the one that's closest to your heart, check out their site and repo, and send your first MR.\n","open-source",[721,268,1364,9],{"slug":1587,"featured":6,"template":699},"freedesktop-org-migrates-to-gitlab","content:en-us:blog:freedesktop-org-migrates-to-gitlab.yml","Freedesktop Org Migrates To Gitlab","en-us/blog/freedesktop-org-migrates-to-gitlab.yml","en-us/blog/freedesktop-org-migrates-to-gitlab",{"_path":1593,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1594,"content":1600,"config":1605,"_id":1607,"_type":13,"title":1608,"_source":15,"_file":1609,"_stem":1610,"_extension":18},"/en-us/blog/friends-dont-let-friends-add-options-to-code",{"title":1595,"description":1596,"ogTitle":1595,"ogDescription":1596,"noIndex":6,"ogImage":1597,"ogUrl":1598,"ogSiteName":685,"ogType":686,"canonicalUrls":1598,"schema":1599},"Friends don't let friends add options to code","Creating optional features burdens users and applications – here's how we avoid adding options.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678953/Blog/Hero%20Images/options.jpg","https://about.gitlab.com/blog/friends-dont-let-friends-add-options-to-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Friends don't let friends add options to code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-12-10\",\n      }",{"title":1595,"description":1596,"authors":1601,"heroImage":1597,"date":1602,"body":1603,"category":832,"tags":1604},[852],"2018-12-10","\nSometimes, when trying to make it easier to work in an application, our instinct is to add\noptional features that users can enable if their situations require a specific functionality.\nOur intentions may be good, but these actions can actually cause _more_ problems, since we invite users\n to second-guess their choices by adding extra steps into the user experience.\n\n## The disadvantages of a [choose your own adventure](https://en.wikipedia.org/wiki/Choose_Your_Own_Adventure) model\n\nOne of the most celebrated aspects of [open source](/solutions/open-source/)\nis the freedom that allows developers to brighten a user’s day by adding an\noptional feature that may not be for everyone, but allows a small portion of users\nto engage with a project in a specific way. While it may seem like a great idea\nto cater to individual needs, there are several disadvantages to making something\nan option.\n\n### It creates more work for developers\n\nCreating extra options means more work for both frontend and backend teams.\nThese features add additional code, tests, and documentation for each setting,\nand the various states alter the UI. Adding options hurts you in every step of\nthe development process.\n\n### It places a burden on the user to choose\n\nWhen we solve problems by including options, we force a user to think about the\nfunction and consider its purpose and drawbacks, placing a burden on them to\ncontrol how they use an application. A user hesitates and has to make a decision\nabout whether this is something that should be enabled. After all, if an option\nsignificantly enhanced the user experience, then wouldn’t it have been automatically\nintegrated?\n\n### It makes future functionality more difficult to implement\n\nThere's also the long-term impact of additional options. Just one extra option can lead to one of two\npaths, which might influence other parts of an application. So, every\ntime we add an option, the number of states of the application doubles. That's\nexponential growth and it adds up quickly, making it harder to diagnose errors. Multiple\noptions can lead to the creation of states of which we’re unaware, so\nit’s harder for the user to understand how an application should behave, because\nthey don't know whether errors are due to an option or not. And, if it is an\noption causing the error, _which_ option is the problem?\n\n## How we avoid adding options: Bask in the glow of iteration\n\nSo, how do you know if a feature should be optional or not? At GitLab, we ship\nthe first [iteration](https://handbook.gitlab.com/handbook/values/#iteration) and keep delivering based on\nuser feedback. Some of the features that we anticipated may never roll out,\nbecause users didn’t request them. Iteration allows us to reduce the scope of\ndevelopment and avoid including features that aren’t popular or useable.\n\nWhenever users need something new, try to create a solution that's acceptable\nfor the most number of people. Rely on your development and operations teams to\nprovide feedback and ask them to relate to the end user. Conducting\n[UX research](/handbook/product/ux/ux-research/#ux-research) with your users\nalso helps identify pain points and needs.\n\nTeams are continually constrained by development capacity, and adding options to\napplications can absorb previous time and effort. We suggest shipping your\napplication without an option and waiting to see whether people request it or\nmake a\n[feature proposal](https://gitlab.com/gitlab-org/gitlab-ce/issues?label_name%5B%5D=feature+proposal)\nfor it. In the end, our role is to solve users’ problems, and our goal is to\nidentify the underlying cause of a challenge and fix it in a way that doesn't\nneed an option.\n\n[Cover image](https://unsplash.com/photos/pKeF6Tt3c08) by [Brendan Church](https://unsplash.com/@bdchu614) on Unsplash\n{: .note}\n",[790,791,721,9],{"slug":1606,"featured":6,"template":699},"friends-dont-let-friends-add-options-to-code","content:en-us:blog:friends-dont-let-friends-add-options-to-code.yml","Friends Dont Let Friends Add Options To Code","en-us/blog/friends-dont-let-friends-add-options-to-code.yml","en-us/blog/friends-dont-let-friends-add-options-to-code",{"_path":1612,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1613,"content":1619,"config":1625,"_id":1627,"_type":13,"title":1628,"_source":15,"_file":1629,"_stem":1630,"_extension":18},"/en-us/blog/get-ready-for-new-gitlab-web-ide",{"title":1614,"description":1615,"ogTitle":1614,"ogDescription":1615,"noIndex":6,"ogImage":1616,"ogUrl":1617,"ogSiteName":685,"ogType":686,"canonicalUrls":1617,"schema":1618},"A first look at the new GitLab Web IDE and remote development experience","The next-generation GitLab Web IDE, available to everyone, will enable faster and more efficient contributions right from your browser.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682545/Blog/Hero%20Images/navin-beta-unsplash.jpg","https://about.gitlab.com/blog/get-ready-for-new-gitlab-web-ide","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A first look at the new GitLab Web IDE and remote development experience\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eric Schurter\"}],\n        \"datePublished\": \"2022-12-15\",\n      }",{"title":1614,"description":1615,"authors":1620,"heroImage":1616,"date":1622,"body":1623,"category":300,"tags":1624},[1621],"Eric Schurter","2022-12-15","\n\nA little while back I wrote about the [future of the GitLab Web IDE](/blog/the-future-of-the-gitlab-web-ide/) and our decision to [rebuild the Web IDE](https://gitlab.com/groups/gitlab-org/-/epics/7683) on top of the open source VS Code project. Our goal: To make it simple for anyone and everyone to contribute, regardless of their development experience. Today, I am happy to announce that we are preparing to launch the new Web IDE experience as a beta, **available to everyone, and enabled by default on GitLab.com.** \n\nDevelopers and non-developers alike need to be able to contribute from anywhere, across multiple projects, and without context switching or the need to manage a local development environment. The new Web IDE is more user-friendly and efficient, combining VS Code's powerful core features with significantly improved performance and the ability to securely connect to a remote development environment directly from the Web IDE.\n\n## Start using the Web IDE Beta December 19\n\nI know you're excited to try it. We've been using it internally and it's fantastic. If you use GitLab.com, expect to see the Web IDE Beta available on December 19, 2022. There's nothing else you have to do, nothing to install, and no configuration necessary. After the launch, the Web IDE Beta will be the default experience across GitLab.\n\n![Screenshot of welcome screen](https://about.gitlab.com/images/blogimages/web-ide-images/ide-welcome-screen.png){: .shadow}\n\n### Available in 15.7 for self-managed users\nFor self-managed users, you'll get the Web IDE Beta as part of the GitLab 15.7 release, which will be available December 22, 2022. It will be behind a [feature flag](https://docs.gitlab.com/ee/user/project/web_ide_beta/index.html#enable-the-web-ide-beta) that admins can enable on an instance-level. \n\n## What can you expect with the new Web IDE? \n\nThe Web IDE Beta introduces a number of new features and improvements over the previous Web IDE, including the following: \n\n- A flexible and customizable interface with collapsible panels and custom themes\n\n![Screenshot of Web IDE interface](https://about.gitlab.com/images/blogimages/web-ide-images/ide-interface.png){: .shadow}\n\n- Contextual actions and drag & drop support in the file panel\n\n![Screenshot of file panel](https://about.gitlab.com/images/blogimages/web-ide-images/ide-file-panel.png){: .shadow}\n\n- Find and replace across all open files\n\n![Screenshot of find and replace](https://about.gitlab.com/images/blogimages/web-ide-images/ide-find-replace.png){: .shadow}\n\n- An interactive document outline and visual history panel\n- Up to 80% reduction in memory usage over the previous Web IDE\n- Improved reliability of tracking changes to files and directories\n- Better support for touchscreen devices such as tablets and (larger) smartphones\n\nThere's much, much more included in the Web IDE Beta, as you'll soon find out. But there's one more big thing to mention... \n\n## Interactive terminal access with remote development\n\nLast but not least, the beta introduces an entirely new category to GitLab by making it possible to securely connect to a remote development environment, run commands in an interactive terminal panel, and get real-time feedback from right inside the Web IDE. See it in action in this short video: \n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/q_xzzY9GT9c\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nThis is the first feature released as part of our new Remote Development category, with much more planned in the near future. By connecting the Web IDE to a cloud runtime environment, you can unlock the full potential of a web-based IDE without having to manage your own local environment. More information about configuring your remote development environment can be found in our [documentation](https://docs.gitlab.com/ee/user/project/remote_development/) and more details about the Remote Development roadmap can be found on our [direction page](/direction/create/ide/remote_development/). Be on the lookout for a lot more about remote development in the coming months.\n\n## What about the previous Web IDE? \n\nThe Web IDE Beta is ready to handle many of the most frequently performed tasks you could tackle in the existing Web IDE, like committing changes to multiple files and reviewing merge request diffs, but in a much more powerful and familiar interface. We hope you'll enjoy working in the Web IDE Beta as much as we do. \n\nIf there's something missing, or for whatever reason you need to use the previous Web IDE experience, don't worry: We've included a [user preference](https://gitlab.com/-/profile/preferences) that allows you to switch back and forth between the two whenever you want. We'll keep both around until we're out of beta, something we have planned for GitLab 16.0 in May 2023, so you can maximize your efficiency while you adopt the new features. \n\n![Screenshot of user preference](https://about.gitlab.com/images/blogimages/web-ide-images/ide-user-preference.png){: .shadow}\n\n## What's next for the GitLab Web IDE? \n\nOf course, we're not done yet! We will be improving the features you see today and introducing some exciting new features before we come out of beta. We're working on adding support for [VS Code extensions](https://gitlab.com/groups/gitlab-org/-/epics/7685) and [enabling project-wide search](https://gitlab.com/groups/gitlab-org/-/epics/9466), but we are making this beta available to everyone because we want to hear from you. What's the most important missing piece for you? How can we make you more productive in the Web IDE? Let us know in the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/385787) and we'll keep iterating!\n\nCover image by [Navin Rai](lhttps://unsplash.com/@nicque) on [Unsplash](https://unsplash.com/photos/EgyIbEB7n8Y?utm_source=unsplash&utm_medium=referral&utm_content=creditShareLink)\n{: .note}\n",[834,1014,767,9],{"slug":1626,"featured":6,"template":699},"get-ready-for-new-gitlab-web-ide","content:en-us:blog:get-ready-for-new-gitlab-web-ide.yml","Get Ready For New Gitlab Web Ide","en-us/blog/get-ready-for-new-gitlab-web-ide.yml","en-us/blog/get-ready-for-new-gitlab-web-ide",{"_path":1632,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1633,"content":1639,"config":1644,"_id":1646,"_type":13,"title":1647,"_source":15,"_file":1648,"_stem":1649,"_extension":18},"/en-us/blog/get-started-compliance-as-code",{"title":1634,"description":1635,"ogTitle":1634,"ogDescription":1635,"noIndex":6,"ogImage":1636,"ogUrl":1637,"ogSiteName":685,"ogType":686,"canonicalUrls":1637,"schema":1638},"Why building compliance as code in DevOps will benefit your entire company","Read here on how to integrate compliance as code into your DevOps cycle and why it's important to have in your business","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680734/Blog/Hero%20Images/compliance-as-code-header.jpg","https://about.gitlab.com/blog/get-started-compliance-as-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why building compliance as code in DevOps will benefit your entire company\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-08-19\",\n      }",{"title":1634,"description":1635,"authors":1640,"heroImage":1636,"date":1641,"body":1642,"category":718,"tags":1643},[1423],"2019-08-19","\n\nCompliance, both regulatory and self-imposed, is another area where the shift-left\nmovement has taken hold. By building compliance into your workflow with compliance as code methods, your\nteam can save time while producing secure, low-risk code.\n\n## What is compliance as code?\n\nCompliance as code methods ensure that the correct regulatory or company\ncompliance requirements are fulfilled with zero-touch on the path to production.\nIt builds compliance into development and operations.\n\nThe utilization of compliance as code tools enable stakeholders to ensure that production procesesses are compliant by means of defining how resources must be configured. Such a structure often allows these tools to automatically adjust resources into a compliant state in order to meet these pre-defined compliance requirements.\n\nThis type of minimal-friction compliance is a crucial solution for large\nenterprises – especially those subject to complex regulation (such as enterprises\noperating in healthcare or financial services). By building compliance into the\n[DevOps lifecycle](/topics/devops/), you will streamline the workflow and save developers valuable\ntime during review and testing.\n\n## Benefits of compliance as code\n\nAdopting compliance as code brings a number of advantages and new operational capabilities. \n\n- **It’s easier to stay compliant during compliance rule change periods.** When a change happens in regulatory compliance frameworks, awareness and remediation of any issues happen more quickly because teams don’t have to manually overhaul processes or re-train.\n- **More natural alignment between developers and risk assessment teams.** There is more unity between teams when the compliance controls are already defined as code. It’s then possible to embed compliance rules into delivery processes and enable compliant delivery by default. \n- ** A lot of time and money saved.** Automation cuts out costly and time-consuming manual work. When automated compliance as code is in place, there’s a reduced risk of costly fines and data breaches. \n- **It’s all scalable.** Adopting compliance as code means adopting consistency across teams and an organization, regardless of size. This consistency prevents ambiguity and bottlenecks in maintaining compliance. \n\n## Challenges of compliance as code\n\nDevOps means experiencing changes often and quickly, and despite the benefits that automated compliance as code brings, it can also be a challenge. It can sometimes be difficult for security to keep up with the speed of change.\n\nAnd sometimes, even automated compliance as code isn’t perfect. It’s important to remember that there’s no cap on how careful you should be when it comes to DevOps compliance. Despite having automation in place, a pair or two of human eyes open to keep watch is still useful – even if it means a possible increase in human error. \n\n## How to impliment compliance as code\n\nAs [Jim Bird wrote for O’Reilly](https://www.oreilly.com/learning/compliance-as-code),\ncompliance as code policies must be defined up front, and will bring together\nmanagement, compliance, internal audit, PMO, and infosec leaders. This group\nwill work together to define rules and control workflows. Management also needs\nto understand how operational and other risks will be handled throughout the\npipeline.\n\nHow your company does establish compliance as code policies [will depend on how your team is structured](/topics/devops/build-a-devops-team/)\nbut regardless of how your teams interact, transparency is required. To ensure\nthat information is shared and decisions are made collaboratively, consider\nestablishing the following guidelines:\n\n- **Peer reviews**: The first review cycle for larger changes should be manual, to\nensure no changes are made without at least one other person verifying the\nchange. Reviewers can be assigned randomly to ensure the quality of review.\n- **Static application security testing**: [Static\n(or white box) testing](/blog/developer-intro-sast-dast/) should be done for every code change, in addition to\nmanual reviews.\n- **Subject matter expert reviews for high-risk code**: For code that the management team defines as\nhigh-risk (such as security code), changes should be reviewed by a subject matter\nexpert.\n- **Regulated access controls**: Management should keep access in check, both so that\nchanges aren’t made by a single engineer, and so that every change flows through\nthe workflow and can be reviewed by anyone with access to the dashboard.\n\n### Enhance technology with culture\n\nTechnology and processes will only work if your team cultures are aligned with your goal – and culture starts\nat the top. Team leaders should promote and exemplify a security-first\nmentality and openness to collaborative change. This will be a new way of\nthinking for some, but it will help teams adopt the shift-left trend, ultimately\nsaving everyone time and reducing business risk.\n\n### Compliance and open source\n\nIn 2015, [The Linux Foundation found that more than 60% of companies build products with open source software](https://www.linuxfoundation.org/blog/2015/06/why-companies-that-use-open-source-need-a-compliance-program/), but more\nthan half of those companies don’t have formal procedures in place to ensure their\nsoftware complies with open source licenses and regulations. Companies should\ncreate a free and open source software (FOSS) compliance program not only to\nabide by copyright notices and license obligations, but also to protect company\nIP and third-party source code from disclosure.\n\n## How we do compliance at GitLab\n\nWe [began our formalized compliance program](/blog/choosing-a-compliance-framework/)\ntowards the end of our Series C funding round, which was fairly early compared\nto other businesses of our size. The benefit of starting early was that we were\nable to implement security controls while we were still developing and evolving\nour operating processes, instead of retrofitting security to the business. The\nkey decision in our approach was choosing between independent or aggregate\nsecurity controls: We chose the aggregate route, leveraging [Adobe’s CCF](https://blogs.adobe.com/security/2017/05/open-source-ccf.html),\nrather than implementing industry frameworks individually. This allowed us to\nmitigate overlapping asks to GitLab teams, which enabled an agile and efficient\nprogram standup, and gave the compliance group internal credibility.\n\n## Compliance as code provides benefits across your ecosystem\n\nThere are benefits to everyone from the developer to the third-party auditor when compliance is baked into code from the beginning. These benefits include:\n- **Time saved**: Your\nteams will spend less time passing code fixes back and forth.\n- **Compliance transparency**: Management will\nunderstand where and how your software abides by compliance requirements.\n- **Routine reporting streamlines auditing**: Reports throughout the DevOps lifecycle provide documentation and proofs of\nrecord that will help management track and streamline any regulatory audit\nprocedures.\n\n## Common compliance as code tools\n\nGoogle Cloud Platform, Amazon Web Services, and Azure are all cloud services that can be used in compliance as code. And oftentimes, these tools are even more effective when paired with native tools. \n\nThrough proper tool adoption, the three core actions of a compliance strategy can be automated: prevention, detection, and remediation.\n\nCover image by [Hack Capital](https://unsplash.com/@hackcapital?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/code?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[696,1385,722,787,789,9],{"slug":1645,"featured":6,"template":699},"get-started-compliance-as-code","content:en-us:blog:get-started-compliance-as-code.yml","Get Started Compliance As Code","en-us/blog/get-started-compliance-as-code.yml","en-us/blog/get-started-compliance-as-code",{"_path":1651,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1652,"content":1658,"config":1664,"_id":1666,"_type":13,"title":1667,"_source":15,"_file":1668,"_stem":1669,"_extension":18},"/en-us/blog/getting-started-with-value-streams-dashboard",{"title":1653,"description":1654,"ogTitle":1653,"ogDescription":1654,"noIndex":6,"ogImage":1655,"ogUrl":1656,"ogSiteName":685,"ogType":686,"canonicalUrls":1656,"schema":1657},"Getting started with the new GitLab Value Streams Dashboard","Benchmark your value stream lifecycle, DORA, and vulnerabilities metrics to gain valuable insights and uncover patterns for continuous improvements.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671793/Blog/Hero%20Images/16_0-cover-image.png","https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with the new GitLab Value Streams Dashboard\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2023-06-12\",\n      }",{"title":1653,"description":1654,"authors":1659,"heroImage":1655,"date":1661,"body":1662,"category":832,"tags":1663},[1660],"Haim Snir","2023-06-12","\n\n\u003Ci>This is part two of our multipart series introducing you to the capabilities within GitLab Value Stream Management and the Value Streams Dashboard. In part one, [learn about the Total Time Chart](https://about.gitlab.com/blog/value-stream-total-time-chart/) and how to simplify top-down optimization flow with Value Stream Management.\u003C/i>\n\nGetting started with GitLab [Value Streams Dashboard](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html), a customizable dashboard that enables decision-makers to identify trends, patterns, and opportunities for digital transformation improvements, is easy. If you're already using GitLab Value Stream Management, simply navigate to your project's or group's Analytics tab, and within [Value stream analytics](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#view-value-stream-analytics), click on the \"Value Streams Dashboard - DORA\" link. This will open a new page with the Value Streams Dashboard.\n\n![image of DORA Metrics console](https://about.gitlab.com/images/blogimages/vsdCover.png){: .shadow}\nDORA metrics comparison panel\n{: .note.text-center}\n\nGitLab Value Stream Management allows customers to visualize their end-to-end DevSecOps workstreams, manage their software development processes, and gain insight into how digital transformation and technological investments are delivering value and driving business results. GitLab Value Stream Management is able to do this because GitLab provides an entire DevOps platform as a single application and, therefore, holds all the data needed to provide end-to-end visibility throughout the entire software development lifecycle. So now your decisions rely on actual data rather than blind estimation or gut feelings. Additionally, because GitLab is the place where work happens, GitLab Value Stream Management insights are also actionable, allowing your users to move from \"understanding\" to \"fixing\" at any time, from within their workflow and without losing context.\n\nThe centralized UI in Value Streams Dashboard acts as the single source of truth (SSOT), where all stakeholders can access and view the same set of metrics that are relevant to the organization. The SSOT views ensure consistency, eliminate discrepancies, and provide a reliable and unified source of data for decision-making and analysis.\n\nThe first iteration of the GitLab Value Streams Dashboard was focused on enabling teams to continuously improve software delivery workflows by benchmarking [value stream lifecycle metrics, DORA metrics, and vulnerabilities metrics](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html#dashboard-metrics-and-drill-down-reports). One of the key features is a new DevSecOps metrics comparison panel that displays the metrics for a group or project in the month-to-date, last month, the month before, and the past 180 days.\n\nThis comparison enables managers to track team improvements in the context of the other DevSecOps metrics to find patterns or trends over time. The data is presented in a clear and concise manner, ensuring that you can quickly grasp the significance of the metrics.\n\n![The Value Streams Dashboard helps you get a high-level custom view over multiple DevOps metrics and understand whether they are improving month-over-month](https://about.gitlab.com/images/blogimages/2023-05-18_vsd_1.gif){: .shadow}\nValue Streams Dashboard metrics comparison panel\n{: .note.text-center}\n\nAdditionally, from each metric you can drill down to a detailed report to investigate the underlying data, understand what is affecting the team performance, and identify actionable insights.\n\nWe understand that every organization has its own set of subgroups and projects, each with specific processes and terminology. That's why we designed our dashboard to be flexible and adaptable. Users have the power to [customize](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html#customize-the-dashboard-panels) their dashboard by including panels from different subgroups or projects. \n\nTracking and comparing these metrics over a period of time helps teams catch downward trends early, drill down into individual projects/metrics, take remedial actions to maintain their software delivery performance, and track progress of their innovation investments. Value Streams Dashboard's intuitive interface reduces the learning curve and eliminates the need for extensive training. Everyone can now immediately leverage the platform's unified data store power, maximizing their productivity and saving precious time and resources.\n\n## Value Streams Dashboard roadmap\nWe are just getting started with delivering new capabilities in our Value Streams Dashboard. The roadmap includes planned features and functionality that will continue to improve decision-making and operational efficiencies.\n\nSome of the capabilities we plan to focus on next include:\n\n- adding an [executive-level summary](https://gitlab.com/groups/gitlab-org/-/epics/9558) of key metrics related to software performance and flow of value across the organization\n- adding a [\"DORA Performers score\"](https://gitlab.com/groups/gitlab-org/-/epics/10416) panel with the DORA metrics health from all the organization's groups and projects\n- adding [filter by label to the comparison panel](https://gitlab.com/gitlab-org/gitlab/-/issues/388890) - we recognize that every team does not follow the same flow so we are adding them to slice and dice the dashboard views with GitLab labels as filters\n\nTo help us improve the Value Stream Management Dashboard, please share feedback about your experience in this [survey](https://gitlab.fra1.qualtrics.com/jfe/form/SV_50guMGNU2HhLeT4).\n\n## Learn more\n* Find out what's next on the [Value Stream Management direction page](https://about.gitlab.com/direction/plan/value_stream_management/#whats-next-and-why).\n\n* Learn how to use the new dashboard using the [Value Streams Dashboard documentation](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html).\n\n* Watch this short video on Value Streams Dashboards:\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/EA9Sbks27g4\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\nCheck out part three of this multipart series: \"[GitLab's 3 steps to optimizing software value streams](https://about.gitlab.com/blog/three-steps-to-optimize-software-value-streams/)\".\n\n\u003Ci>Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.\u003C/i>\n",[1035,834,1074,9,744],{"slug":1665,"featured":6,"template":699},"getting-started-with-value-streams-dashboard","content:en-us:blog:getting-started-with-value-streams-dashboard.yml","Getting Started With Value Streams Dashboard","en-us/blog/getting-started-with-value-streams-dashboard.yml","en-us/blog/getting-started-with-value-streams-dashboard",{"_path":1671,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1672,"content":1678,"config":1685,"_id":1687,"_type":13,"title":1688,"_source":15,"_file":1689,"_stem":1690,"_extension":18},"/en-us/blog/git-for-business-processes",{"title":1673,"description":1674,"ogTitle":1673,"ogDescription":1674,"noIndex":6,"ogImage":1675,"ogUrl":1676,"ogSiteName":685,"ogType":686,"canonicalUrls":1676,"schema":1677},"How we use Git as the blockchain for process changes","Git can be useful for more than just coding and operations. It can help you run your entire business – here's how we do it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679971/Blog/Hero%20Images/git-blockchain.jpg","https://about.gitlab.com/blog/git-for-business-processes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we use Git as the blockchain for process changes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2019-01-15\",\n      }",{"title":1673,"description":1674,"authors":1679,"heroImage":1675,"date":1681,"body":1682,"category":811,"tags":1683},[1680],"Aricka Flowers","2019-01-15","\n\nGit may have started out as a way to collaborate on code, but there’s no denying that it has crept into the operations side of things. But does it stop there? We don’t think so.\n\nJust like [blockchain technology](https://blockgeeks.com/guides/what-is-blockchain-technology/) was originally created for cryptocurrency, but is now seen as a revolutionary way to share, store and update [all kinds of data](https://www.fool.com/investing/2018/04/11/20-real-world-uses-for-blockchain-technology.aspx), we see – and use – Git in much the same way.\n\nIn addition to version controlling code and the environment in which it lives, Git can also be used at a high level to facilitate the way a company actually functions, according to our CEO [Sid Sijbrandij](/company/team/#sytses).\n\nHe says GitLab is a prime example of how it can be done.\n\n## How we use Git to run GitLab, the company\n\n\"We’re not just trying to version our code and operations, we're also trying to version all the processes we have at the company, and we do that for a whole slew of reasons,\" says Sid. \"If you write your processes down, it's easier to change and for someone to propose a change. If it's all stored in people's heads, how are you going to change it? You'll have to create a presentation and make sure everyone reads it. But if it’s written down, it's faster to make a change and you're better able to communicate the context for it.\"\n\n### How Git has helped us to scale\n\nUsing Git to implement procedural changes within the company has helped GitLab shoulder growing pains, thanks to our [handbook](https://handbook.gitlab.com/handbook/).\n\n\"Although we're not a perfect company by any means, we've been able to scale really rapidly, onboard people and get them started with the work they have to do,\" Sid says. \"And I think our handbook and how we describe things is an important part of that. It's exciting to see it grow. The handbook is now over 2,000 pages, so people can't read everything anymore, but they can read the parts that are relevant to them, and it's really helping with organizational changes that are happening between different departments.\"\n\nSid admits running a business with Git collaboration can seem like a daunting task, especially for companies that did not start out functioning that way. But he urges business leaders to give the process a chance, pointing to a number of companies that are adopting Git as a way to make procedural changes, including O’Reilly Media and several law firms.\n\n## Two tips for adopting Git to run your business\n\n### 1. Evangelize from the top down\n\n\"First of all, this is super hard. It's unnatural and it requires constant campaigning from the top of the company,\" Sid said. \"The natural state is for all the documentation to get out of date, and for people to send each other emails and PowerPoints about the change they want to make without looking at the rest of the changes.\"\n\n### 2. Make processes easier to change\n\n\"What you frequently find in companies is that there's the official process, and then the process that people really use. You can prevent that by making processes easier to change. The reality is people are changing processes in a company every single day, and they have to make those changes quickly. So the harder you make it, the more diversions there will be between reality and what's in the handbook. Instead, empower everyone in the organization to make those changes and do so quickly. That is one of the most important things you can do.\"\n\n\"Our handbook is [Creative Commons](https://creativecommons.org/licenses/by-sa/4.0/), so feel free to use that as a starting point for anything that you do.\" [Tweet us](http://twitter.com/gitlab) if you do borrow from or adapt our handbook – we'd love to hear about it.\n\n[Cover image](https://unsplash.com/photos/mf-o1E7omzk) by [chuttersnap](https://unsplash.com/@chuttersnap) on Unsplash\n{: .note}\n",[696,1684,790,721,9],"git",{"slug":1686,"featured":6,"template":699},"git-for-business-processes","content:en-us:blog:git-for-business-processes.yml","Git For Business Processes","en-us/blog/git-for-business-processes.yml","en-us/blog/git-for-business-processes",{"_path":1692,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1693,"content":1698,"config":1704,"_id":1706,"_type":13,"title":1707,"_source":15,"_file":1708,"_stem":1709,"_extension":18},"/en-us/blog/gitlab-and-testify-sec-witness-alliance",{"title":1694,"description":1695,"ogTitle":1694,"ogDescription":1695,"noIndex":6,"ogImage":1086,"ogUrl":1696,"ogSiteName":685,"ogType":686,"canonicalUrls":1696,"schema":1697},"How to enhance supply chain security with GitLab and TestifySec","New alliance partner TestifySec makes Witness available in GitLab","https://about.gitlab.com/blog/gitlab-and-testify-sec-witness-alliance","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to enhance supply chain security with GitLab and TestifySec\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nicole Schwartz\"}],\n        \"datePublished\": \"2022-03-16\",\n      }",{"title":1694,"description":1695,"authors":1699,"heroImage":1086,"date":1701,"body":1702,"category":832,"tags":1703},[1700],"Nicole Schwartz","2022-03-16","\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n\nToday, GitLab is excited to announce that our partner [TestifySec](https://www.testifysec.com/) has integrated their [Witness](https://github.com/testifysec/witness) open-source tool into GitLab allowing us to take another step along our [Secure Software Supply Chain Direction](https://about.gitlab.com/direction/supply-chain/).\n\n## Secure software supply chain \n\nAn emerging concern in the software development space is being able to secure your software supply chain, an important element of which is documenting the entire supply chain and development progress by creating a chain of custody starting from code creation, build, test, package, and going through deployment. One important element of this chain of custody is commonly referred to as a Software Bill of Materials [SBOM](https://www.ntia.gov/SBOM). There are also frameworks, such as [SLSA](https://slsa.dev/) which collect additional elements about the process. Together these documents are becoming critical components to satisfying regulated industry requirements.\n\nThere are many opportunities as a DevOps Platform to rise to the challenge of creating transparency around software components or artifacts. \n\n## TestifySec Witness\n\nRecent compromises and attacks on the software supply chain such as Solarburst and Log4shell highlight the need for a new way of securing CI systems and their artifacts. This is why [TestifySec](https://www.testifysec.com/) created [Witness](https://github.com/testifysec/witness).\n\nCI systems are an incredible source of data.  Many CI systems such as GitLab, along with their cloud infrastructure, provide tokens with non-falseable data. Witness verifies and records this data, along with inputs and outputs from a CI process in a verifiable and standardized way.\n\nIn current generation CI systems we restrict the release of artifacts based on pass or failure of build steps. However, most organizations have no standardized way to leverage the metadata available during the CI process in order to inform policy decisions in production environments.\n\nIn next-generation CI systems, data collected during the CI process is not thrown away. Instead, we make this data available to security administrators for use at any policy enforcement point.  With [Witness](https://github.com/testifysec/witness), you shift security left, while communicating risk right.  \n\nOnce an artifact is built it becomes difficult to understand where it was built. Most major cloud providers provide some sort of identity mechanism to verify the instance identity. On AWS this is called the Instance metadata service. The data available in this API is verifiable and is a perfect data structure to make an Witness attestation.\n\nWitness records AWS identity metadata and cryptographically links it to the build artifact and any other events in that CI process.  \n\nYou can [see the demo](https://gitlab.com/testifysec/demos/witness-demo).\n\nGitLab and TestifySec will be enhancing our features around this as time goes on - keep an eye out for more!\n\nRead more about GitLab's [Secure Software Supply Chain Direction](https://about.gitlab.com/direction/supply-chain/).\n",[787,696,9],{"slug":1705,"featured":6,"template":699},"gitlab-and-testify-sec-witness-alliance","content:en-us:blog:gitlab-and-testify-sec-witness-alliance.yml","Gitlab And Testify Sec Witness Alliance","en-us/blog/gitlab-and-testify-sec-witness-alliance.yml","en-us/blog/gitlab-and-testify-sec-witness-alliance",{"_path":1711,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1712,"content":1718,"config":1723,"_id":1725,"_type":13,"title":1726,"_source":15,"_file":1727,"_stem":1728,"_extension":18},"/en-us/blog/gitlab-auto-devops-in-action",{"title":1713,"description":1714,"ogTitle":1713,"ogDescription":1714,"noIndex":6,"ogImage":1715,"ogUrl":1716,"ogSiteName":685,"ogType":686,"canonicalUrls":1716,"schema":1717},"GitLab Auto DevOps in action","See how the only single application for the entire DevOps lifecycle helps you deliver better software, faster.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664015/Blog/Hero%20Images/laptop.jpg","https://about.gitlab.com/blog/gitlab-auto-devops-in-action","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Auto DevOps in action\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2018-08-10\",\n      }",{"title":1713,"description":1714,"authors":1719,"heroImage":1715,"date":1720,"body":1721,"category":832,"tags":1722},[1680],"2018-08-10","\n\nBetter and faster. These two words best describe the production goals of the IT leaders and engineers building today’s cutting-edge software. And GitLab [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) can help them hit those goals while improving their overall business outcomes.\n\nAs the only single application for the complete [DevOps](/topics/devops/) lifecycle, GitLab Auto DevOps gives development teams all the tools they need to deliver secure, high-quality software at previously unattainable speeds. The secret sauce that makes Auto DevOps so effective is the way it automatically sets up the required integrations and pipeline needed to get your software out of the door faster. With Auto DevOps, your code is automatically tested for quality, scanned for security vulnerabilities and licensing issues, packaged and then set up for monitoring and deployment, leaving engineers with time to place more attention on creating a better product.\n\nThis may all make sense in theory, but as they say, a picture is worth 1,000 words. And it is [rumored](https://idearocketanimation.com/4293-video-worth-1-million-words/?) that video is worth 1.8 million words. With that being said, why not take a look at GitLab Auto DevOps in action? \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4Uo_QP9rSGM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWant to learn more about GitLab Auto DevOps? Check out our [documentation](https://docs.gitlab.com/ee/topics/autodevops/), [feature](https://docs.gitlab.com/ee/topics/autodevops/) and [product vision](/direction/) pages.\n\n\nCover photo by [Ash Edmonds](https://unsplash.com/photos/Koxa-GX_5zs) on [Unsplash](https://unsplash.com/)\n{: .note}\n\n",[722,790,789,9,787,743,834],{"slug":1724,"featured":6,"template":699},"gitlab-auto-devops-in-action","content:en-us:blog:gitlab-auto-devops-in-action.yml","Gitlab Auto Devops In Action","en-us/blog/gitlab-auto-devops-in-action.yml","en-us/blog/gitlab-auto-devops-in-action",{"_path":1730,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1731,"content":1737,"config":1743,"_id":1745,"_type":13,"title":1746,"_source":15,"_file":1747,"_stem":1748,"_extension":18},"/en-us/blog/gitlab-chart-works-towards-kubernetes-1-22",{"title":1732,"description":1733,"ogTitle":1732,"ogDescription":1733,"noIndex":6,"ogImage":1734,"ogUrl":1735,"ogSiteName":685,"ogType":686,"canonicalUrls":1735,"schema":1736},"GitLab Chart works towards Kubernetes 1.22","New minimum version is 1.19 for in-chart NGINX Ingress Controller.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670178/Blog/Hero%20Images/GitLab-Ops.png","https://about.gitlab.com/blog/gitlab-chart-works-towards-kubernetes-1-22","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Chart works towards Kubernetes 1.22\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-12-17\",\n      }",{"title":1732,"description":1733,"authors":1738,"heroImage":1734,"date":1739,"body":1740,"category":832,"tags":1741},[1010],"2021-12-17","\n\nWe are working to make the GitLab Chart and the GitLab Operator support Kubernetes 1.22, which requires updating the NGINX Ingress Controller used within the Chart and Operator.\n\nThis update requires that we drop support for versions of Kubernetes prior to 1.19 if using the in-chart NGINX Ingress Controller. Users that still require support for Kubernetes 1.18 and prior releases will only be able to deploy up to Chart version 5.5.x.\n\n## More details on the changes\n\nGitLab uses a [forked version](https://docs.gitlab.com/charts/charts/nginx/fork.html) of the community-supported ingress-nginx Chart to expose the GitLab components via Ingresses. \n\nSupporting Kubernetes 1.22 requires updating the included NGINX Ingress Controller to [version 1.0.4](https://github.com/kubernetes/ingress-nginx/releases/tag/controller-v1.0.4) in order to support the networking.k8s.io/v1 API in Kubernetes 1.22. The previous networking API (networking.k8s.io/v1beta1) has been deprecated since Kubernetes 1.19 and removed in Kubernetes 1.22.\n\nAs a result of the upgrade, we are bound to the breaking change of NGINX Ingress Controller, removing support before Kubernetes 1.19. They provide more clarification in [their FAQ](https://kubernetes.github.io/ingress-nginx/#faq-migration-to-apiversion-networkingk8siov1).\n\nThe forked ingress-nginx Chart is based on [version 4.0.6](https://artifacthub.io/packages/helm/ingress-nginx/ingress-nginx/4.0.6) of ingress-nginx/ingress-nginx, which uses [version 1.0.4](https://github.com/kubernetes/ingress-nginx/releases/tag/controller-v1.0.4) of the NGINX Ingress Controller.\n\n## Who is impacted\n\nAny deployment which is making use of the NGINX Ingress Controller provided by the GitLab Chart. This covers most, but far from all, users of our Helm Chart and Operator. If you are using an alternate Ingress provider (such as AWS ALB, Azure Application Gateway, or Google GCE Ingress), you will not be affected.\n\n## What to expect\n\nWe recognize that this change may have unintended effects, but most GitLab instances will seamlessly transition to the new NGINX Ingress Controller without incident. As always, we recommend a backup be created prior to upgrading the GitLab Chart or GitLab Operator, which will allow your data to be safeguarded should a recovery be necessary, caused by complications in the upgrade.\n\nDepending upon the environment and/or cloud provider, it is possible that when NGINX Ingress Controller is replaced during the upgrade process that the IP addresses associated with the Ingresses may change. This may require that the DNS records for the GitLab instance be updated if a controller such as external-dns is not managing the DNS records. The DNS records related to the following Ingress objects may be affected:\n\n* gitlab.\n* registry.\n* minio. (if used)\n* kas. (if used)\n\nIf the GitLab Pages component is enabled, there may be other DNS records that will need to be updated to connect to the proper Ingress.\n\n## What if there is a problem with the upgrade?\n\nWhile it is not expected that an upgrade will cause a problem, not all environments or configurations can be anticipated. In the event that there is an upgrade problem, please contact GitLab Support if you are a licensed customer. If you are running the Community Edition of GitLab, please open an issue in the [GitLab Chart](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/new?issue%5Bmilestone_id%5D=) or [GitLab Operator](https://gitlab.com/gitlab-org/cloud-native/gitlab-operator/-/issues/new?issue%5Bmilestone_id%5D=) projects.\n",[722,9,1742],"kubernetes",{"slug":1744,"featured":6,"template":699},"gitlab-chart-works-towards-kubernetes-1-22","content:en-us:blog:gitlab-chart-works-towards-kubernetes-1-22.yml","Gitlab Chart Works Towards Kubernetes 1 22","en-us/blog/gitlab-chart-works-towards-kubernetes-1-22.yml","en-us/blog/gitlab-chart-works-towards-kubernetes-1-22",{"_path":1750,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1751,"content":1757,"config":1762,"_id":1764,"_type":13,"title":1765,"_source":15,"_file":1766,"_stem":1767,"_extension":18},"/en-us/blog/gitlab-ci-cd-is-for-multi-cloud",{"title":1752,"description":1753,"ogTitle":1752,"ogDescription":1753,"noIndex":6,"ogImage":1754,"ogUrl":1755,"ogSiteName":685,"ogType":686,"canonicalUrls":1755,"schema":1756},"GitLab CI/CD is for multi-cloud","Can cloud providers (and their tools) ever be cloud agnostic? We discuss GitHub Actions and GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678401/Blog/Hero%20Images/gitlab-for-multicloud.jpg","https://about.gitlab.com/blog/gitlab-ci-cd-is-for-multi-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab CI/CD is for multi-cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-11-06\",\n      }",{"title":1752,"description":1753,"authors":1758,"heroImage":1754,"date":1759,"body":1760,"category":718,"tags":1761},[1113],"2019-11-06","\nAs organizations continue to go all-in on cloud-first strategies, optimizing their cloud architectures is becoming a top priority. It’s estimated that investments in infrastructure to support cloud computing account for [more than a third of all IT spending](https://www.zdnet.com/article/top-cloud-providers-2019-aws-microsoft-azure-google-cloud-ibm-makes-hybrid-move-salesforce-dominates-saas/). Using multiple cloud providers with multiple cloud services requires an architecture that enables workflow portability, and organizations will need an unbiased, multi-cloud strategy to make that a reality.\n\n## What is multi-cloud?\n\nMulti-cloud describes [how enterprises use multiple cloud providers to meet different technical or business requirements](https://www.zdnet.com/article/multicloud-everything-you-need-to-know-about-the-biggest-trend-in-cloud-computing/). At its core, multi-cloud is made possible through cloud-native applications built from containers using services from different cloud providers. It allows for multiple services to be managed in one architecture. [85% of enterprises currently operate in multiple clouds](https://www.ibm.com/blogs/cloud-computing/2018/10/19/survey-multicloud-management-tools/), but just because an organization uses multiple cloud providers doesn’t necessarily mean they are multi-cloud.\n\nBeing dependent on one cloud provider can limit the flexibility of an organization and leave it susceptible to vendor lock-in. Workflow portability is one of the benefits of multi-cloud and it enables a seamless workflow, regardless of _where_ you deploy.\n\nIn addition to workflow portability, there are several reasons why most businesses have adopted multi-cloud, and why more will continue to use this approach:\n\n*   **Greater flexibility**: Each cloud vendor shines in some areas and is weak in others. Using multiple vendors lets you use the right tool for the job.\n*   **Better acquisitions**: Whether an organization wants to grow through acquisitions (or be acquired itself), existing systems can work within another company’s infrastructure, even if both are using separate cloud providers.\n*   **Increased resilience**: Architecting failover between multiple cloud providers lets you stay up even if one of your vendors is down.\n*   **Improved cloud negotiations**: If another cloud vendor offers better terms or significant credits, businesses can have better leverage because their [DevOps processes](/topics/devops/) are not tied to vendor-specific services.\n*   **Fewer conflicts of interest**: With cloud service providers offering so many different services, you’re less likely to find yourself [in conflict with customers competing in those same spaces](https://www.cnbc.com/2017/06/21/wal-mart-is-reportedly-telling-its-tech-vendors-to-leave-amazons-cloud.html).\n\nA multi-cloud strategy allows organizations to use the tools and services that work best for the job, not just tools that work within their cloud environment.\n\n## Can cloud providers really support multi-cloud?\n\nCloud service providers continually compete with each other to provide more services to keep customers in their cloud. The more services you have with one CSP, the less likely you are to migrate those workloads. AWS offers 90 different services, as does GCP. In comparison, [Microsoft lists over 160 services on its Azure product page](https://www.parkmycloud.com/cloud-services-comparison/) and many of them are integrations with other Microsoft products. Cloud service providers want to have more of your business by making you more dependent on their specific services.\n\nEven though most cloud providers claim to support multi-cloud, migrating workloads out of their cloud isn’t in their best interest. As cloud computing is a pay-per-use model, it seems unlikely that multi-cloud would be a goal for the large cloud providers.\n\n## Implementing CI/CD in the cloud\n\nIn the [RightScale 2019 State of the Cloud Report](https://info.flexera.com/CM-REPORT-State-of-the-Cloud), 33% of respondents mentioned [implementing CI/CD](/topics/ci-cd/) in the cloud as a top cloud initiative. DevOps processes play a big role in multi-cloud deployments, so if organizations are wanting to build faster and deploy anywhere, CI/CD will be a key factor in that success. Multi-cloud is all about being cloud-agnostic, and your tools should also support that goal.\n\nBut what if your CI/CD comes from a cloud provider?\n\n### GitHub Actions and GitLab CI/CD\n\nIn 2018, [GitHub announced Actions](/blog/github-launch-continuous-integration/) with CI-like functionality built into a single application offering. The industry has shown us in the past year that single application functionality [is becoming a trend](/blog/built-in-ci-cd-version-control-secret/), and GitLab has been a part of that single application message since the beginning. Now that continuous integration has caught up with the importance of single application, we have to examine how both GitHub and GitLab fit into multi-cloud deployments.\n\nIn June 2018 [Microsoft acquired GitHub](/blog/microsoft-acquires-github/), which really affirmed the importance of software developers and modern DevOps. Developer tools have a high capacity for driving cloud usage because once you have your application code hosted, the natural next step is finding a place to deploy it. From a strategic standpoint, this acquisition made a lot of sense for Microsoft because they could use [GitHub’s popularity as a source code management tool as a springboard for greater Azure adoption](https://www.techrepublic.com/article/with-github-acquisition-microsoft-wants-to-make-azure-the-default-cloud-for-developers/).\n\nWhen we talk about multi-cloud in the CI/CD conversation, cloud-agnosticism kind of goes out the window when it comes to GitHub Actions. GitHub’s ubiquity in the SCM market means that millions of developers are using that platform, and it’s those users that [made GitHub such an appealing asset for Microsoft](/blog/microsoft-acquires-github/).\n\nGitLab, in comparison, is cloud-independent. When organizations use GitLab CI/CD, there is no conflict of interest in using one cloud provider over another. Being truly cloud-agnostic means that GitLab provides a complete [DevOps platform](/solutions/devops-platform/) that allows teams to have the same productivity metrics, the same governance, regardless of what cloud you use.\n\n“Choosing a cloud provider should depend on the company’s business objectives, it should not be constrained by technology, and GitLab wants to enable every one of our customers to have this freedom,” says [Sid Silbrandij](/company/team/#sytses), co-founder and CEO at GitLab.\n\n## Multi-cloud should mean any cloud\n\nBusinesses want to choose cloud providers for their inherent value and use the services that best meet their needs. In turn, we should expect our DevOps processes to support multi-cloud objectives. Partnering with cloud-agnostic vendors provides a consistent workflow across all clouds, and CI/CD will play a big role in the multi-cloud future.\n\nWe’d love for you to watch our webcast _Mastering your CI/CD_ so you can see for yourself how GitLab’s industry-leading CI/CD helps teams build, test, deploy, and monitor code on any cloud.\n\n[Watch the webcast](/competition/github/)\n{: .alert .alert-gitlab-purple .text-center}\n\nCover image by [Alexandre Chambon](https://unsplash.com/@goodspleen?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n",[108,9,722],{"slug":1763,"featured":6,"template":699},"gitlab-ci-cd-is-for-multi-cloud","content:en-us:blog:gitlab-ci-cd-is-for-multi-cloud.yml","Gitlab Ci Cd Is For Multi Cloud","en-us/blog/gitlab-ci-cd-is-for-multi-cloud.yml","en-us/blog/gitlab-ci-cd-is-for-multi-cloud",{"_path":1769,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1770,"content":1776,"config":1782,"_id":1784,"_type":13,"title":1785,"_source":15,"_file":1786,"_stem":1787,"_extension":18},"/en-us/blog/gitlab-composer-packages-migration-path",{"title":1771,"description":1772,"ogTitle":1771,"ogDescription":1772,"noIndex":6,"ogImage":1773,"ogUrl":1774,"ogSiteName":685,"ogType":686,"canonicalUrls":1774,"schema":1775},"Migrate composer packages to GitLab","GitLab Packages now ships with a composer registry","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681586/Blog/Hero%20Images/gitlab-composer-package-migration.jpg","https://about.gitlab.com/blog/gitlab-composer-packages-migration-path","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Migrate composer packages to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jochen Roth\"}],\n        \"datePublished\": \"2020-09-22\",\n      }",{"title":1771,"description":1772,"authors":1777,"heroImage":1773,"date":1779,"body":1780,"category":1239,"tags":1781},[1778],"Jochen Roth","2020-09-22","{::options parse_block_html=\"true\" /}\n\n\n\n\n## Manage Composer Packages in Gitlab\n\n\nWith Gitlab 13.3, a repository for composer packages was introduced. This\nallows you to store and manage your private and public composer packages in\nyour Gitlab instance.\n\n\n### Create Packages\n\n\nCreating packages is only a matter of adding the composer template to your\n`.gitlab-ci.yml`. Gitlab will automatically create a package when a git tag\nwas created or a commit was pushed to a branch.\n\n\n[\u003Cimg src=\"/images/blogimages/select-template.png\" width=\"500\"\nheight=\"auto\">](/images/blogimages/select-template.png)\n\n\n### Migrate your Packages to Gitlab\n\n\nYou might wonder how to create packages for your existing tags of each\nrepository.\n\n\nYou have 2 options:\n\n\n1. Use curl to create packages manually e.g. ```curl --data tag=1.0.0\n'https://__token__:\u003Cpersonal-access-token>@gitlab.com/api/v4/projects/\u003Cproject_id>/packages/composer'```\n\n2. Use [this\npackage](https://gitlab.com/ochorocho/gitlab-create-package-versions), which\nwill create all packages and their versions for you using the Gitlab API.\n\n\n## Conclusion\n\n\nSo far it is working pretty well. Publish and install packages works\nflawlessly. Managing permissions for a package is a breeze.\n\n\nCurrently there is only a group endpoint. I could imagine others may require\nan instance endpoint to be able to access all packages of a Gitlab instance\nusing a single endpoint/repository.\n\nFor now you have to add multiple endpoints/repositories to your\ncomposer.json for each group.\n\n\nIn my company, one group contains all shared projects and we were able\ncircumvent adding multiple endpoints/repositories.\n\n\nThere is always room for improvement. For example, the GUI should show more\n[details about the size of packages and how it was published (manually or\nvia CI)](https://gitlab.com/gitlab-org/gitlab/-/issues/254385) and [semantic\nversioning is not fully\nsupported](https://gitlab.com/gitlab-org/gitlab/-/issues/240887).\n\nIf you are interested in GitLab or Composer, both issues are great ways to\ncontribute, so that we can continue to improve this product together.\n\n\n## Resources\n\n\n* [GitLab Packages\nDocs](https://docs.gitlab.com/ee/user/packages/composer_repository/)\n\n* [Composer Docs](https://getcomposer.org/doc/)\n\n* [Migrate to Gitlab\nPackages](https://gitlab.com/ochorocho/gitlab-create-package-versions)\n",[721,9,232],{"slug":1783,"featured":6,"template":699},"gitlab-composer-packages-migration-path","content:en-us:blog:gitlab-composer-packages-migration-path.yml","Gitlab Composer Packages Migration Path","en-us/blog/gitlab-composer-packages-migration-path.yml","en-us/blog/gitlab-composer-packages-migration-path",{"_path":1789,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1790,"content":1796,"config":1802,"_id":1804,"_type":13,"title":1805,"_source":15,"_file":1806,"_stem":1807,"_extension":18},"/en-us/blog/gitlab-daily-tools",{"title":1791,"description":1792,"ogTitle":1791,"ogDescription":1792,"noIndex":6,"ogImage":1793,"ogUrl":1794,"ogSiteName":685,"ogType":686,"canonicalUrls":1794,"schema":1795},"How to improve your daily GitLab experience","Personal tools and tips for a more productive GitLab experience","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664102/Blog/Hero%20Images/gitlab-values-cover.png","https://about.gitlab.com/blog/gitlab-daily-tools","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to improve your daily GitLab experience\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2019-11-26\",\n      }",{"title":1791,"description":1792,"authors":1797,"heroImage":1793,"date":1799,"body":1800,"category":1239,"tags":1801},[1798],"Viktor Nagy","2019-11-26","{::options parse_block_html=\"true\" /}\n\n\n\n\n\u003C!-- Content start here -->\n\n\nThis is a collection of tools and settings I use to create a more productive\nGitLab experience. \n\n\n*Disclaimer: all screenshots are using Firefox's Hungarian language\nsetting.*\n\n\n## Easy navigation\n\n\nI use Firefox, but it should work in Chrome too. Basically, after\nbookmarking a website, you can add a `keyword` to it. This allows for quick\nnavigation.\n\n\n![Firefox bookmarks with\nkeywords](https://about.gitlab.com/images/blogimages/gitlab-daily-tools/firefox-bookmarks.png){:\n.shadow.medium}\n\n\nMy keyworded navigation includes the following pages:\n\n\n- `gl-epics` -->\n[https://gitlab.com/groups/gitlab-org/-/epics?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Asystem&search=](https://gitlab.com/groups/gitlab-org/-/epics?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Asystem&search=)\n\n- `gl-issues` -->\n[https://gitlab.com/groups/gitlab-org/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Asystem&search=](https://gitlab.com/groups/gitlab-org/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Asystem&search=)\n\n- `gl-product` -->\n[https://gitlab.com/groups/gitlab-org/-/boards/1342179?label_name[]=group%3A%3Asystem&search=](https://gitlab.com/groups/gitlab-org/-/boards/1342179?label_name[]=group%3A%3Asystem&search=)\n\n- `gl-new` -->\n[https://gitlab.com/gitlab-org/gitlab/issues/new?issuable_template=Problem_Validation&issue[title]=](https://gitlab.com/gitlab-org/gitlab/issues/new?issuable_template=Problem_Validation&issue[title]=)\n\n\n## Easy search (complex way)\n\n\nThe quick-links above are nice, but you browser can do even more!\n\nYou can actually use the above keywords to pass a search query while you\nnavigate to the given page.\n\n\n![Search with\nkeywords](https://about.gitlab.com/images/blogimages/gitlab-daily-tools/firefox-search.png){:\n.shadow.medium}\n\n\nBy writing `gl-new This is a new issue` a \"new issue\" page will open and\nprefill `This is a new issue` as the title. You can use this pre-fill\nmechnism to filter the issues, epics lists or a board too.\n\n\nHow can you achieve this? The argument we are passing for `gl-new` can be\nreferenced as `%s` in the final url.\n\nThis means that my actual bookmarked urls are the following:\n\n\n- `gl-epics` -->\n[https://gitlab.com/groups/gitlab-org/-/epics?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Asystem&search=%s](https://gitlab.com/groups/gitlab-org/-/epics?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Asystem&search=%s)\n\n- `gl-issues` -->\n[https://gitlab.com/groups/gitlab-org/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Asystem&search=%s](https://gitlab.com/groups/gitlab-org/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=group%3A%3Asystem&search=%s)\n\n- `gl-product` -->\n[https://gitlab.com/groups/gitlab-org/-/boards/1342179?label_name[]=group%3A%3Asystem&search=%s](https://gitlab.com/groups/gitlab-org/-/boards/1342179?label_name[]=group%3A%3Asystem&search=%s)\n\n- `gl-new` -->\n[https://gitlab.com/gitlab-org/gitlab/issues/new?issuable_template=Problem_Validation&issue[title]=%s](https://gitlab.com/gitlab-org/gitlab/issues/new?issuable_template=Problem_Validation&issue[title]=%s)\n\n\n*Note:* unfortunately, only simple strings can be searched this way. Adding\nextra labels does not work.\n\n\n## Easy search (simple way)\n\n\nThe above is one way to search different sites easily. You can achieve\nsomething similar (without bookmarks) by adding\n\na new search engine for your browser.\n\n\n![Add a search\nengine](https://about.gitlab.com/images/blogimages/gitlab-daily-tools/firefox-search-engine.png){:\n.shadow.medium}\n\n\nI have such search engines added for the GitLab documentation and the GitLab\nhandbook. You can easily add a new search\n\nby right clicking the search bar on the GitLab docs site, and selecting the\n`Add a keyword for this search` menu option.\n\nUnfortunately, the above does not work for the handbook.\n\n\nTo add handbook search on Firefox, one can use the [Add custom search\nengine](https://addons.mozilla.org/hu/firefox/addon/add-custom-search-engine/)\nadd-on (you can remove it after adding the engine). On Chrome, you can just\nadd the engine under \n\nyour settings. To search the handbook, I use Google's site search\nfunctionality, and my search engine contains the following url:\n[https://www.google.com/search?q=site%3Ahttps%3A%2F%2Fabout.gitlab.com%2Fhandbook+%s](https://www.google.com/search?q=site%3Ahttps%3A%2F%2Fabout.gitlab.com%2Fhandbook+%s)\n\n\n## Quick actions made _really_ quick\n\n\nI often find myself repeating the same actions, such as adding the same\nlabels to multiple issues or assigning issues to myself. When I want to\napply a label, I have to manually type most of the label and autocompleting\n`~\"workflow::product validation\"` does not help much unfortunately. So I\ncame up with a different solution.\n\n\nThere is a handy browser plugin that allows you to script around any\nwebpage. It's called [TamperMonkey](https://www.tampermonkey.net/). I have\ncreated some *VeryQuickActions* using this plugin. \n\nDo you have a similar `Quick action` line in your GitLab input areas?\n\n\n![Quick Actions\naddon](https://about.gitlab.com/images/blogimages/gitlab-daily-tools/tm-quickactions.png){:\n.shadow.medium}\n\n\nYou can get those nice links at the bottom of the filed by [adding its\nscript](https://gitlab.com/gitlab-com/www-gitlab-com/-/snippets/1999778) to\nTamperMonkey.\n\n\nCustomizing these quick actions is quite easy and does not require advance\nprogramming skills.\n\nknowledge. You can open the above script (TamperMonkey) has a built-in\neditor for this.\n\n\nIf you would like to change the content of these quick links, you can use\n`TamperMonkey`'s built-in editor. Just look for the following lines:\n\n\n```js\n    const actions = [\n        ['/assign me', 'Mine'],\n        ['/label ~\"group::system\"', 'System label'],\n        ['/label ~\"workflow::problem validation\"', 'Problem label'],\n        ['/label ~\"workflow::solution validation\"', 'Solution label'],\n        ['/label ~\"workflow::validation backlog\"', 'Backlog label'],\n    ]\n```\n\n\nThese lines define the links that will be created. The first item in the\nlists show what will be included in the description or comment text on\nGitLab. The second item defines the text on the link.\n\nYou can use these as a guideline to create your own.\n\n\n*Note:* there is still a missing feature I would like to add to this script:\nI would like to make it easy to assign an issue\n\nto the previously viewed epic.\n\n\n## Filtering to-do's and checkboxes\n\n\nWhile I was on-boarding as a new GitLab team member, I ran a few scripts in\nthe developer console to hide already checked checkboxes in a list and to\ndim the lines that did not contain my name. Since then, I have found myself\nneeding similar functionality from time to time.\n\n\n![Filter\ncheckboxes](https://about.gitlab.com/images/blogimages/gitlab-daily-tools/tm-filter.png){:\n.shadow.medium}\n\n\nOn the above image. Which checkboxes are relevant to me?\n\n\nAgain, [the solution is a TamperMonkey\nscript](https://gitlab.com/gitlab-com/www-gitlab-com/-/snippets/1999779).\nThis script adds a small filter button\n\nbeside the GitLab search box. Filtering issues leaves (or excludes) only\nthose checkboxes on the page that contain your search term.\n\nIf you want to exclude the search term, start your filter with an\nexclamation mark `!`.\n\n\n![Filter\ncheckboxes](https://about.gitlab.com/images/blogimages/gitlab-daily-tools/tm-filter2.png){:\n.shadow.medium}\n\n\n## What are your tips and tricks\n\n\nWe would love to hear your tips and tricks for using GitLab. Feel free to\nleave them in a comment below.\n",[790,9],{"slug":1803,"featured":6,"template":699},"gitlab-daily-tools","content:en-us:blog:gitlab-daily-tools.yml","Gitlab Daily Tools","en-us/blog/gitlab-daily-tools.yml","en-us/blog/gitlab-daily-tools",{"_path":1809,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1810,"content":1816,"config":1822,"_id":1824,"_type":13,"title":1825,"_source":15,"_file":1826,"_stem":1827,"_extension":18},"/en-us/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai",{"title":1811,"description":1812,"ogTitle":1811,"ogDescription":1812,"noIndex":6,"ogImage":1813,"ogUrl":1814,"ogSiteName":685,"ogType":686,"canonicalUrls":1814,"schema":1815},"GitLab Duo Workflow: Enterprise visibility and control for agentic AI","Secure, autonomous, context-aware AI agents take on complex tasks, freeing developers to ship innovative software faster. Private beta waitlist now open.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749660174/Blog/Hero%20Images/Workflow_1800x945.png","https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo Workflow: Enterprise visibility and control for agentic AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Pini Wietchner\"}],\n        \"datePublished\": \"2025-02-24\",\n      }",{"title":1811,"description":1812,"authors":1817,"heroImage":1813,"date":1819,"body":1820,"category":764,"tags":1821},[1818],"Pini Wietchner","2025-02-24","Today, we're excited to announce the opening of the waitlist for the [private beta of GitLab Duo Workflow](https://about.gitlab.com/gitlab-duo/agent-platform/): **agentic AI built on top of the most comprehensive DevSecOps platform.** The next step in our AI roadmap, GitLab Duo Workflow will help development teams navigate everything from project bootstrapping to deployment processes, from debugging issues to cross-team coordination, all within the IDE.\n\nGitLab Duo Workflow leverages the GitLab platform's structure for collaboration, continuous integration, continuous deployment, security, and compliance to help organizations as they accelerate their development process with AI agents.\n\nUse GitLab Duo Workflow to help you:\n* [bootstrap a new development project](#from-slow-project-setup-to-a-running-start)\n* [modernize code](#from-legacy-code-to-modern-applications)\n* [perform contextual tasks](#from-context-switching-to-flow-state)\n* [create documentation](#from-stale-docs-to-dynamic-knowledge)\n* [enhance test coverage](#from-patchy-to-comprehensive-testing)\n* and more\n\nThis is just the beginning. With GitLab’s unified data store, the more you use GitLab, the more context GitLab Duo Workflow has about your code, configurations, security findings, and deployment practices. The result: an increasingly powerful development experience that's tailored to your organization.\n\n## The promise and challenge of AI agents\n\nSoftware has fundamentally changed the world, but only a tiny fraction of the world's population has the skills to build software today. Yet, these developers reach billions of people with smartphones and internet connections. Just imagine a world where *more* people can build, secure, and deliver production-ready software – there will be an explosion of innovation as more people can create software that impacts billions. **Agentic AI will make that happen.**\n\nAI agents understand context, maintain knowledge of entire codebases, and actively collaborate on complex software projects across development, security, and operations. With AI agents, developers can create software at a scale previously unimaginable for individuals or even teams.\n\nBut this shift raises important questions about visibility, control, and how AI will impact developers' work. Organizations need to ensure AI enhances their developers' capabilities while enabling them to maintain oversight of their development process. The key to success isn't just adopting AI – it's adopting it in a way that empowers developers while preserving security, compliance, and governance.\n\n## AI's success depends on your platform, not more add-on tools\n\nWhen you're working with more developers, code, and potential security risks, adding separate tools for each new challenge only creates more complexity. Our most recent [DevSecOps Survey](https://about.gitlab.com/the-source/platform/devops-teams-want-to-shake-off-diy-toolchains-a-platform-is-the-answer/) shows just how serious this problem is: DevSecOps teams are juggling up to 14 different tools, with professionals spending up to 80% of their time on non-coding tasks. For AI to be truly effective, it also needs high-quality, unified data. That's hard to achieve with disparate tools.\n\n**The GitLab DevSecOps platform combined with GitLab AI agents** brings everything together in a single data model that encapsulates source code, merge requests, epics, users, access rights, and more. The agents we're building use context about users and projects to standardize how teams work and automate the non-coding tasks that absorb developer time, such as scanning for security issues and enforcing compliance rules. When AI is built directly into the platform, these capabilities become even more powerful, turning AI agents into development partners while keeping you in control of how AI enhances the process.\n\n**This isn't a far-off future — it's what we're building right now with GitLab Duo Workflow.**\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1059060959?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"GitLab Duo Workflow, the future of secure agentic AI software development\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>`\n\n## GitLab Duo Workflow: AI agents on the most comprehensive DevSecOps platform\n\nLeveraging GitLab's end-to-end DevSecOps platform, GitLab Duo Workflow helps developers work at their highest potential. While AI coding assistants help with individual pieces of code, GitLab Duo Workflow will understand your entire development lifecycle – automating routine tasks so developers can focus on strategic innovation and creative problem-solving. As we develop GitLab Duo Workflow, here’s what it will be able to help teams achieve:\n\n### From slow project setup to a running start\n\nDevelopers spend precious time configuring new projects, managing dependencies, and setting up basic infrastructure instead of building new features. With GitLab Duo Workflow, you can **automate project bootstrapping directly in the IDE**, providing the right configurations from the start so you can focus on innovation sooner.\n\n### From legacy code to modern applications\n\nModernizing legacy code is more than just updating syntax — it requires understanding dependencies, tests, CI/CD pipelines, and documentation. GitLab Duo Workflow helps **modernize your codebase by handling code refactoring** – from code to tests.\n\n### From context switching to flow state\n\nToday, developers constantly switch between tools, docs, and codebases to solve problems. GitLab Duo Workflow will help **resolve tasks with the full context of your codebase-related issues and merge requests**, letting developers stay in their flow.\n\n### From stale docs to dynamic knowledge\n\nDocumentation becomes stale quickly, making codebases harder to understand and maintain. GitLab Duo Workflow **supports developers in generating and updating documentation**, including README files, code flow diagrams, and architecture documentation.\n\n### From patchy to comprehensive testing\n\nAs codebases grow, maintaining comprehensive test coverage becomes increasingly challenging. GitLab Duo Workflow **can generate tests for entire sections of your codebase** while integrating with your existing test infrastructure, ensuring more reliable software with less effort.\n\n## Sign up for the private beta waitlist\n\n[Sign up for the GitLab Duo Workflow private beta waitlist](https://about.gitlab.com/gitlab-duo/agent-platform/) to see the next step in our vision for secure agentic AI – from project setup to deployment. Built on GitLab's DevSecOps platform, these agents understand your entire software lifecycle while maintaining the enterprise-grade security and control organizations require.\n\n*Disclaimer: This page contains information about upcoming products, features, and functionality. This information is for informational purposes only and should not be relied upon for purchasing or planning. All items are subject to change or delay, and the development, release, and timing remain at GitLab Inc.'s sole discretion.*",[495,766,834,767,1014,9],{"slug":1823,"featured":90,"template":699},"gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai","content:en-us:blog:gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai.yml","Gitlab Duo Workflow Enterprise Visibility And Control For Agentic Ai","en-us/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai.yml","en-us/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai",{"_path":1829,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1830,"content":1836,"config":1843,"_id":1845,"_type":13,"title":1846,"_source":15,"_file":1847,"_stem":1848,"_extension":18},"/en-us/blog/gitlab-extends-omnibus-package-signing-key-expiration",{"title":1831,"description":1832,"ogTitle":1831,"ogDescription":1832,"noIndex":6,"ogImage":1833,"ogUrl":1834,"ogSiteName":685,"ogType":686,"canonicalUrls":1834,"schema":1835},"GitLab extends Omnibus package signing key expiration to 2024","Our GPG key will now expire on July 1, 2024. Here's what you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669863/Blog/Hero%20Images/security-pipelines.jpg","https://about.gitlab.com/blog/gitlab-extends-omnibus-package-signing-key-expiration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab extends Omnibus package signing key expiration to 2024\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"João Alexandre Prado Tavares Cunha\"}],\n        \"datePublished\": \"2023-06-14\",\n      }",{"title":1831,"description":1832,"authors":1837,"heroImage":1833,"date":1839,"body":1840,"category":1014,"tags":1841},[1838],"João Alexandre Prado Tavares Cunha","2023-06-14","\n\nGitLab uses a GNU Privacy Guard (GPG) key to sign all Omnibus packages created within the CI pipelines to ensure that the packages have not been tampered with. This key is separate from the repository metadata signing key used by package managers and the GPG signing key for the GitLab Runner. The Omnibus package signing key, which is set to expire on July 1, 2023, will be extended to expire on July 1, 2024.\n\n## Why are we extending the deadline?\nThe Omnibus package signing key's expiration is extended each year to comply with GitLab security policies and to limit the exposure should the key become compromised. The key's expiration is extended instead of rotating to a new key to be less disruptive for users who do verify package integrity checks prior to installing the package.\n\n## What do I need to do?\nThe only action that needs to be taken is to update your copy of the package signing key _if_ you validate the signatures on the Omnibus packages that GitLab distributes.\n\nThe package signing key is not the key that signs the repository metadata used by the OS package managers like `apt` or `yum`. Unless you are specifically verifying the package signatures or have configured your package manager to verify the package signatures, there is no action needed on your part to continue installing Omnibus packages.\n\nMore information concerning [verification of the package signatures](https://docs.gitlab.com/omnibus/update/package_signatures#package-signatures)\nis available in the Omnibus documentation. If you just need to refresh a copy\nof the public key, then you can find it on any of the GPG keyservers by\nsearching for support@gitlab.com or using the key ID of\n`DBEF 8977 4DDB 9EB3 7D9F  C3A0 3CFC F9BA F27E AB47.` Alternatively you could\ndownload it directly from packages.gitlab.com using the URL:\n\n    https://packages.gitlab.com/gitlab/gitlab-ce/gpgkey/gitlab-gitlab-ce-3D645A26AB9FBD22.pub.gpg\n\n## What do I do if I still have problems?\nPlease open an issue in the [omnibus-gitlab issue tracker](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/new?issue&issuable_template=Bug).\n",[787,1842,9,1014],"security releases",{"slug":1844,"featured":6,"template":699},"gitlab-extends-omnibus-package-signing-key-expiration","content:en-us:blog:gitlab-extends-omnibus-package-signing-key-expiration.yml","Gitlab Extends Omnibus Package Signing Key Expiration","en-us/blog/gitlab-extends-omnibus-package-signing-key-expiration.yml","en-us/blog/gitlab-extends-omnibus-package-signing-key-expiration",{"_path":1850,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1851,"content":1857,"config":1864,"_id":1866,"_type":13,"title":1867,"_source":15,"_file":1868,"_stem":1869,"_extension":18},"/en-us/blog/gitlab-for-agile-portfolio-planning-project-management",{"title":1852,"description":1853,"ogTitle":1852,"ogDescription":1853,"noIndex":6,"ogImage":1854,"ogUrl":1855,"ogSiteName":685,"ogType":686,"canonicalUrls":1855,"schema":1856},"How to use GitLab for Agile portfolio planning and project management","GitLab provides features that are flexible enough to be used for scaled Agile portfolio planning and project management, regardless of the framework you choose.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669575/Blog/Hero%20Images/agilemultipleteams.jpg","https://about.gitlab.com/blog/gitlab-for-agile-portfolio-planning-project-management","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab for Agile portfolio planning and project management\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Victor Hernandez\"},{\"@type\":\"Person\",\"name\":\"Julie Byrne\"}],\n        \"datePublished\": \"2020-11-11\",\n      }",{"title":1852,"description":1853,"authors":1858,"heroImage":1854,"date":1861,"body":1862,"category":832,"tags":1863},[1859,1860],"Victor Hernandez","Julie Byrne","2020-11-11","\nMany organizations using GitLab want to understand how to best apply the various features to support [Agile project and portfolio management](/solutions/agile-delivery/) processes (PPM) at scale. These organizations use different Agile frameworks. In a previous blog post, we outlined [an approach for using GitLab for Agile software development](/blog/gitlab-for-agile-software-development/). Since the original post, we've continued to enhance functionality for lean/Agile portfolio planning and Agile project management. In this blog post, we’re updating recommendations for using Agile based on these enhancements and we describe how these features can be utilized for a variety of different scaling frameworks.\n\n## Agile software development at scale\n\nFirst, let’s take a look at a typical scaling model of [Agile software development](/topics/agile-delivery/) beyond the individual team level. Whether you’ve adopted a specific scaling framework such as the [Scaled Agile Framework (SAFe)](https://www.scaledagileframework.com/), [Disciplined Agile (DA)](https://www.pmi.org/disciplined-agile), [Large Scale Scrum (LeSS)](https://less.works/), or [Spotify](https://medium.com/scaled-agile-framework/exploring-key-elements-of-spotifys-agile-scaling-model-471d2a23d7ea), most scaling models have similarities in their approach, organizing Agile teams into teams of teams, and even into teams of teams of teams.\n\n![](https://about.gitlab.com/images/blogimages/team-teams2.png){: .medium.center}\n\nTypically, scaling frameworks use these types of labels to describe each level:\n\n| **Level** | **Common Names** | **Description** |\n| ----- | ----- | ----- |\n| Team | Scrum team, Kanban team, Squad | A cross functional group (including BA, Dev, Test, and other supporting roles) implementing stories and bug fixes for an application or set of applications|\n| Team of Teams | Program, Release Train, Tribe | A set of teams who plan together and coordinate efforts to implement features for a system involving one or more applications |\n| Team of Teams of Teams | Portfolio, Business Unit, Alliance | One or more programs with a shared set of strategic goals and themes, typically funded with a single budget |\n\nNow that we've reviewed the different levels of Agile at scale, let’s next think about what types of data and visibility are required for agility at each level.\n\nThe scrum master/project manager/tribe lead, product owner, and team members are part of the Team level that is focused on short-term planning, typically weekly to monthly. They will want:\n\n- A board view to show flow of work\n- Current and upcoming iteration plan\n- A task list for each work item\n- Visibility into team progress\n- Team predictability\n\nThe program manager/release train engineer, product manager/product area lead, and design lead guide the Team of Teams, with a focus is on mid-range planning, monthly to quarterly (or potentially a bit longer). They will want visibility into:\n\n- A prioritized feature list with anticipated business value captured\n- Feature roadmap\n- View of mid-range plan\n- Epic health\n- Progress against plan\n- Program predictability\n\nFinally, portfolio managers, business leaders, and chief architects perform strategic long-term planning, typically quarterly to annually or longer, at the Team of Teams of Teams level. They will want to see:\n\n- A list of long-term epics/initiatives/business projects, categorized by theme and/or strategic goals\n- The long-term strategic roadmap\n\n## How can we best support these needs using GitLab?\n\nFirst, we need to understand what GitLab object types to use for support the appropriate visibility at each level.\n\n| **GitLab Structure** | **Team** | **Team of Teams** | **Team of Teams of Teams** |\n| ----- | ----- | ----- | -----  |\n| Org structure | Project or sub-sub-group | Sub-group | Top level group |\n| Work items | Issue | Child epic | Parent epic |\n| Time boxes | Iteration | Milestone | Roadmap across milestones |\n\nIn GitLab, epics can be defined in a hierarchy to break down long-term epics into a set of shorter-term epics that can each be delivered by a single Team-of-Teams. While we will use a single parent-child epic hierarchy in this blog to keep things simple, you can use more levels of nesting. The lowest level of epic in the hierarchy would be linked to a set of issues to define the work each team will do in order to implement that epic. GitLab is very flexible and does not enforce a hierarchy. For example, when there are cases when an epic should be tracked at the portfolio level but be decomposed directly into issues, with no features in between, GitLab will allow you to do that linking directly without having to create dummy features in the middle.\n\n![](https://about.gitlab.com/images/blogimages/epic_hierarchy2.png){: .medium.center}\n\nWe recommend using [scoped labels](https://docs.gitlab.com/ee/user/project/labels.html#scoped-labels) to define epic types, e.g., you might define long-term epics to be portfolio epics, and decompose them into shorter-term features. Using _epic::portfolio-epic_ and _epic::feature_ will allow you to appropriately categorize and filter a list of epics and make sure that each epic exists in the appropriate location.\n\nA [group](https://docs.gitlab.com/ee/user/group/) can be used to organize projects. And groups can be nested, e.g., a parent group can contain multiple child groups, and each child group can have its own subgroups, etc. A GitLab [project](https://docs.gitlab.com/ee/user/project/) contains a single source code repository, issue tracker, and associated tools and functionality in order to collaborate on software development for that repository.\n\n![](https://about.gitlab.com/images/blogimages/group_project2.png){: .medium.center}\n\nNote: Group permissions are propagated down the tree from the top-level, so, e.g., a maintainer in the top-level group will have maintainer permissions in the entire group hierarchy.\n\nWe recommend that you use a nested group hierarchy to define your scaled organizational structure for Team of Teams of Teams, Team of Teams, and Teams. For example, consider an electronic banking program that is part of the digital services portfolio for a financial services provider. The electronic banking program might have separate teams that work on web, mobile, backend, and middleware. You would use a parent group for the digital services portfolio, a sub-group for the electronic banking program, and a separate project within the sub-group for each team.\n\n![](https://about.gitlab.com/images/blogimages/group_project_example.png){: .medium.center}\n\nGenerally speaking, parent epics would be defined within the top-level group since they define work that can span the sub-groups. Each parent epic would be broken down into multiple child epics, each of which is defined within the appropriate child group (representing a Team-of-Teams).\n\nThe example above is simple in that each Agile team is working on a single repository. But what if that’s not the case?\n\n- If a single team works exclusively on multiple repositories (but no other team works on the them), then create a sub-group for the team, and include each repo as a project.\n- If multiple teams work on a collection of repositories, use the Team of Teams group for collaboration across all Teams in all projects, and use individual scoped labels for each team to track their issues on filtered boards.\n\nGitLab provides an [issue tracker](https://docs.gitlab.com/ee/user/project/issues/) for any types of issues you want to manage and track. Typically, for Agile software development teams, these would be things like user stories and defects. We recommend that you use [scoped labels](https://docs.gitlab.com/ee/user/project/labels.html#scoped-labels) to define the different issue types, for ease of filtering and reporting. The great news is that you can have as many or as few issue types as you see fit. GitLab does not provide the ability to define a custom schema for each issue type as that tends to complicate both administration and usage of issues and gets in the way of software development. Instead, use [custom issue templates](https://docs.gitlab.com/ee/user/project/description_templates.html#creating-issue-templates) to provide guidance to the end user on what types of information should be captured for each issue type, and even to set labels automatically on the issue as it is created.\n\nGitLab makes project status reporting easy with the issue [health status](https://docs.gitlab.com/ee/user/project/issues/#health-status). Each issue can have a status of `On Track`, `Needs Attention`, or `At Risk`. The health statuses of all issues for an epic are reported within the epic details for a quick snapshot of the health of the overall epic.\n\nFinally, we have to define timeboxes to use for our planning cadences. We tend to use [milestones](https://docs.gitlab.com/ee/user/project/milestones/) for our mid-range planning, i.e., a quarterly development plan. Define the milestone at the highest group level that will be using that cadence, e.g., if the entire portfolio plans on a quarterly basis, then the planning milestone should be defined at the top-level group level. If each team of teams plans on a different mid-range cadence, then you would want to define separate milestones at each child group level. Note that milestones get added directly to issues, so the projects that will use the milestones must be within the group hierarchy where the milestone is defined. One other consideration is that an issue can only have a single milestone associated with it, so it’s a good idea to align on the best use of milestones across the Team of Teams before starting to use them.\n\nWe recently released our [iterations MVC](https://gitlab.com/groups/gitlab-org/-/epics/4012) in GitLab! This allows you to define, at the group or individual project level, short-term cadences that a team or set of teams uses for planning and tracking their work. While, as an MVC, iteration functionality is not yet as robust as milestones, we do have plans for enhancements including using iterations on boards, filtering issue lists by iteration, and burnup/burndown charts. You can view the epic [Iterations in GitLab](https://gitlab.com/groups/gitlab-org/-/epics/2422) to learn more about planned enhancements. And that doesn’t mean Kanban teams are out of luck. We innately support Kanban in GitLab, too, with issue boards, so you can have a mix of iteration based teams and continuous flow teams working together.\n\n## Agile PPM: putting it all together\n\nHere’s how the GitLab features come together to support Agile at scale to allow planning from the highest level down to the individual team, and to provide visibility, traceability, and reporting at each level:\n\n![](https://about.gitlab.com/images/blogimages/epic_hierarchy.png){: .medium.center}\n\nYou can also check out the video below to see how the structure comes together in GitLab.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/5J0bonGoECs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Read more about Agile at GitLab\n\n- [See more information about our Agile delivery solution](/solutions/agile-delivery/)\n- [Build your Agile roadmap in GitLab](https://docs.gitlab.com/ee/user/group/roadmap/)\n- [Learn how to create iterations](https://docs.gitlab.com/ee/user/group/iterations/)\n\nCover image by [Martin Sanchez](https://unsplash.com/@martinsanchez?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/MD6E2Sv__iA)\n{: .note}\n",[744,834,9,696],{"slug":1865,"featured":6,"template":699},"gitlab-for-agile-portfolio-planning-project-management","content:en-us:blog:gitlab-for-agile-portfolio-planning-project-management.yml","Gitlab For Agile Portfolio Planning Project Management","en-us/blog/gitlab-for-agile-portfolio-planning-project-management.yml","en-us/blog/gitlab-for-agile-portfolio-planning-project-management",{"_path":1871,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1872,"content":1878,"config":1885,"_id":1887,"_type":13,"title":1888,"_source":15,"_file":1889,"_stem":1890,"_extension":18},"/en-us/blog/gitlab-for-agile-software-development",{"title":1873,"description":1874,"ogTitle":1873,"ogDescription":1874,"noIndex":6,"ogImage":1875,"ogUrl":1876,"ogSiteName":685,"ogType":686,"canonicalUrls":1876,"schema":1877},"How to use GitLab for Agile software development","How Agile artifacts map to GitLab features and how an Agile iteration looks in GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097459/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2821%29_2pdp2MNB7SoP4MhhiI1WIa_1750097459157.png","https://about.gitlab.com/blog/gitlab-for-agile-software-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab for Agile software development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Victor Wu\"},{\"@type\":\"Person\",\"name\":\"Amanda Rueda\"}],\n        \"datePublished\": \"2018-03-05\",\n      }",{"title":1873,"description":1874,"authors":1879,"heroImage":1875,"date":1881,"body":1882,"category":1506,"tags":1883,"updatedDate":1884},[739,1880],"Amanda Rueda","2018-03-05","Ever wondered if GitLab supports [Agile methodology](https://about.gitlab.com/solutions/agile-delivery/)? If you're considering using GitLab it might not be obvious how the DevSecOps platform's features correspond with Agile artifacts, so we've broken it down for you.\n\nAgile is one of the most important and transformative methodologies introduced to the software engineering discipline in recent decades. While not everyone can agree on the detailed terminology of Agile concepts, it has nonetheless made a significant positive impact on software development teams efficiently creating customer-centric products through [Agile software development](https://about.gitlab.com/topics/agile-delivery/) and delivery processes.\n\nGitLab is designed to be flexible enough to adapt to your software development methodology, whether Agile or influenced by it. In this post, we'll show a simple mapping of Agile artifacts to GitLab features, and explain how customers have successfully run high-performing [Agile software delivery teams](https://about.gitlab.com/solutions/agile-delivery/) with GitLab.\n\n## Mapping Agile artifacts to GitLab features\n\n### Agile artifact &#8594; GitLab feature\n\n- User story –> [Issues](https://docs.gitlab.com/ee/user/project/issues/)\n- Task –> [Tasks](https://docs.gitlab.com/ee/user/tasks.html)\n- Epic –> [Epics](https://docs.gitlab.com/ee/user/group/epics/)\n- Points and estimation –> [Issue weight](https://docs.gitlab.com/ee/user/project/issues/issue_weight.html)\n- Product backlog –> [Issue boards](https://docs.gitlab.com/ee/user/project/issue_board.html)\n- Sprint/iteration –> [Iterations](https://docs.gitlab.com/ee/user/group/iterations/)\n- Agile board –> [Issue boards](https://docs.gitlab.com/ee/user/project/issue_board.html)\n- Team workload –> [Issue boards](https://docs.gitlab.com/ee/user/project/issue_board.html)\n- Burndown chart –> [Burndown charts](https://docs.gitlab.com/ee/user/project/milestones/burndown_and_burnup_charts.html)\n\n## An Agile iteration with GitLab\n\n### User stories &#8594; GitLab Issues\n\nIn Agile software development methodology, you often start with a user story that captures a single feature to deliver business value for users. In GitLab, an [issue](https://docs.gitlab.com/ee/user/project/issues/) serves this purpose with ease. GitLab Issues are essential for Agile teams, providing an effective method to manage tasks and projects. Software developers can create, assign, and track issues, ensuring clear accountability and progress visibility. Issues come with robust metadata such as assignee, iteration, weight, and labels, which enhances task prioritization and workflow management throughout the software development process. Additionally, team collaboration on issues is streamlined with discussion threads, attachments, and real-time updates, enabling effective communication and teamwork.\n\n![screenshot of a GitLab Issue](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097468/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097468371.png)\n\nThe GitLab Issue has a title and a description area in the middle, providing a space to document any details, such as the business value and relevant personas in a user story. The sidebar at the right provides integration with other Agile-compatible features like the epic parent that the issue belongs to, the iteration in which the issue is to be worked on, and the weight of the issue, reflecting the estimated effort.\n\n### Task &#8594; Tasks\n\nOften, a user story is further separated into individual tasks. GitLab [Tasks](https://docs.gitlab.com/ee/user/tasks.html) streamline project management by allowing Agile teams to break down user stories into discrete pieces of work. This feature supports the Agile framework by enabling software developers to create, assign, and track tasks within their projects. By integrating task management directly into GitLab, teams can maintain a cohesive workflow, ensuring all software development project activities are easily tracked and managed.\n\n![screenshot showing precise task management and project tracking using GitLab](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097469/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097468372.png)\n\nEnhance user value by enabling precise task management and project tracking using GitLab. Tasks are equipped with the same metadata as issues, including assignee, iteration, weight, labels, time tracking, and collaboration features. This comprehensive feature set allows Agile teams and project managers to manage workloads effectively, prioritize tasks, and ensure seamless collaboration among software developers.\n\n### Epics &#8594; GitLab Epics\nIn the other direction, some Agile practitioners specify an abstraction above user stories, often called an epic, that indicates a larger user flow consisting of multiple features. In GitLab, an [epic](https://docs.gitlab.com/ee/user/group/epics/) also contains a title and description, much like an issue, but it allows you to attach multiple child issues to it to indicate that hierarchy.\n\n![screenshot of nested GitLab Epics](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097469/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750097468374.png)\n\nGitLab Epics allows Agile teams to organize and manage large projects efficiently by nesting epics up to nine layers deep. This hierarchical structure provides a clear view of the project's roadmap, helping software developers and project managers break down complex initiatives into manageable components. By utilizing child and [linked epics](https://docs.gitlab.com/ee/user/group/epics/linked_epics.html), teams can better track progress, dependencies, and project milestones, enhancing collaboration and ensuring cohesive agile delivery.\n\n### Product backlog &#8594; GitLab Issue Boards\n\nThe product or business owners typically create these user stories to reflect the needs of the business and customers. They are prioritized in a product backlog to capture urgency and desired order of development. The product owner communicates with stakeholders to determine the priorities and constantly refines the backlog.  In GitLab, an [issue board](https://docs.gitlab.com/ee/user/project/issue_board.html) organized with iterations as lists offers a drag-and-drop workflow experience that allows you to effortlessly prioritize your backlog and assign stories to an upcoming sprint.\n\n![Gif of GitLab Issue Board](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097469/Blog/Content%20Images/Blog/Content%20Images/WIP_limit_aHR0cHM6_1750097468376.gif)\n\n### Sprints &#8594; GitLab iterations\n\nA sprint represents a finite time period in which the work is to be completed, which may be a week, a few weeks, or perhaps a month or more. The product owner and the development team meet to decide the work that is in scope for the upcoming sprint. GitLab's [iterations](https://docs.gitlab.com/ee/user/group/iterations/) feature supports this: Assign iterations a start date and a due date to capture the time period of the iteration. The team then puts issues into the sprint by assigning them to that particular iteration.\n\nBy using iterations, you leverage GitLab’s enhanced capabilities for Agile project management, providing better visibility and control over your Agile planning and delivery.\n\n### Points and estimation &#8594; GitLab issue weight\n\nAlso in this meeting, user stories are communicated, and the level of technical effort is estimated for each in-scope user story. In GitLab, issues have a [weight](https://docs.gitlab.com/ee/user/project/issues/issue_weight.html) attribute, which you would use to indicate the estimated effort.\n\nIn this meeting (or in subsequent ones), user stories are further broken down to technical deliverables, sometimes documenting technical plans and architecture. In GitLab, this information can be documented in the issue, or in the [merge request description](https://docs.gitlab.com/ee/user/project/merge_requests/), as the merge request is often the place where technical collaboration happens.\n\nDuring the sprint (GitLab iteration), software development team members pick up user stories to work on, one by one. In GitLab, issues have assignees. So you would [assign](https://docs.gitlab.com/ee/user/project/issues/multiple_assignees_for_issues.html) yourself to an issue to reflect that you are now working on it. We'd recommend that you [create an empty and linked-to-issue merge request](https://docs.gitlab.com/ee/user/project/issues/) right away to start the technical collaboration process, even before creating a single line of code.\n\n### Agile board &#8594; GitLab Issue Boards\n\nThroughout the sprint, issues move through various stages, such as `Ready for dev`, `In dev`, `In QA`, `In review`, `Done`, depending on the workflow in your particular organization. Typically these are columns in an Agile board. In GitLab, [issue boards](https://docs.gitlab.com/ee/user/project/issue_board.html) allow you to define your stages and enable you to move issues through them. The team can [configure the board](https://docs.gitlab.com/ee/user/project/issue_board.html#board-with-configuration) with respect to the iteration and other relevant attributes. During daily stand-ups, the team looks at the board together, to see the status of the sprint from a workflow perspective.\n\n![screenshot of GitLab Issue Board](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097469/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097468378.png)\n\nThe GitLab Issue Board also pulls in issues dynamically, similar to the GitLab issue list. But it allows for more flexible workflows. You can set up individual lists in the board, to reflect Agile board stages. Your team can then control and track user stories as they move from for example, `Ready for dev`, all the way to `Released to production`.\n\n### Team workload &#8594; GitLab Issue Boards\n\nAgile teams can optimize their workflows by creating issue boards with lists scoped to assignees in GitLab. This feature allows you to visualize the distribution of tasks among team members, enhancing Agile delivery. To set it up, navigate to your project or group, create a new board in the \"Boards\" section, and [add lists](https://docs.gitlab.com/ee/user/project/issue_board.html#create-a-new-list) for each assignee. Assign issues to team members, and they will automatically appear in the corresponding lists. This dynamic view empowers balanced workloads and effective task management.\n\n![Screenshot of organized GitLab Issue Board](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097469/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097468380.png)\n\nOrganize an issue board by assignee or by squad using [scoped labels]. GitLab’s Issue Board is incredibly diverse and supports workflows across the software development lifecycle.\n\n### Burndown charts &#8594; GitLab Burndown Charts\n\nThe development team wants to know if they are on track in real time, and mitigate risks as they arise. GitLab provides [burndown charts](https://docs.gitlab.com/ee/user/project/milestones/burndown_and_burnup_charts.html), allowing the team to visualize the work scoped in the current sprint \"burning down\" as they are being completed.\n\nToward the end of the sprint, the development team demos completed features to various stakeholders. With GitLab, this process is made simple using [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/index.html) so that even code not yet released to production, but in various testing, staging or UAT environments can be demoed. Review Apps and [CI/CD features](https://docs.gitlab.com/ee/ci/) are integrated with the merge request itself.\n\nThese same tools are useful for Developers and QA roles to maintain software quality, whether through automated testing with CI/CD, or manual testing in a Review App environment.\n\n![Screenshot of GitLab Burndown Chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097469/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750097468381.png)\n\nThe GitLab Burndown Chart allows a team to track scoped work \"burning down,\" as they are being completed in a sprint. This allows you to react to risks sooner and adapt accordingly, for example, informing your business stakeholders that certain features are anticipated to be delayed to a future sprint.\n\nTeam retrospectives at the end of the sprint can be documented in GitLab’s [wiki](https://docs.gitlab.com/ee/user/project/wiki/index.html), so that lessons learned and action items are tracked over time. During the actual retrospective, the team can look at the [iteration report](https://docs.gitlab.com/ee/user/group/iterations/#iteration-report), which displays the burndown chart and other statistics of the completed sprint.\n\n## Start your Agile journey with GitLab\nReady to elevate your Agile project management? GitLab offers a comprehensive suite of features tailored to Agile teams, software developers, and project managers, ensuring seamless collaboration and efficient workflows. Explore our pricing options, start a free trial and discover how GitLab can transform your Agile delivery processes.\n\n> [Learn more about GitLab Agile planning](https://about.gitlab.com/pricing/) and get started on your journey today!\n",[744,834,9,696],"2024-07-09",{"slug":1886,"featured":6,"template":699},"gitlab-for-agile-software-development","content:en-us:blog:gitlab-for-agile-software-development.yml","Gitlab For Agile Software Development","en-us/blog/gitlab-for-agile-software-development.yml","en-us/blog/gitlab-for-agile-software-development",{"_path":1892,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1893,"content":1899,"config":1905,"_id":1907,"_type":13,"title":1908,"_source":15,"_file":1909,"_stem":1910,"_extension":18},"/en-us/blog/gitlab-for-project-management-one",{"title":1894,"description":1895,"ogTitle":1894,"ogDescription":1895,"noIndex":6,"ogImage":1896,"ogUrl":1897,"ogSiteName":685,"ogType":686,"canonicalUrls":1897,"schema":1898},"How our tool fosters collaborative project management","Our marketing team explains how we use GitLab to manage complex projects. Read how GitLab can improve your collaboration on projects.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680908/Blog/Hero%20Images/stickynotes.jpg","https://about.gitlab.com/blog/gitlab-for-project-management-one","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How our tool fosters collaborative project management\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-12-06\",\n      }",{"title":1894,"description":1895,"authors":1900,"heroImage":1896,"date":1902,"body":1903,"category":832,"tags":1904},[1901],"Sara Kassabian","2019-12-06","\n\n_While it is true that there are few non-technical roles left in today’s business environment, it is notable that even folks outside of engineering use GitLab technology for collaborative project management. In this first part of our two-part series we outline the problems of siloed communications and how GitLab is structured to solve that for developers and everyone else. In part two, we’ll take a deep dive into how we used GitLab to manage an integrated marketing campaign and how our product marketing team uses GitLab for complex project management._\n\nImagine you’re trying to launch a new, integrated campaign. This campaign has a central message (e.g., \"Everyone can contribute\") and it pulls in representatives from many different teams – like social media, blogs, and field marketing – to create the designs and content that make this campaign a reality. The campaign structure is built and you’re ready to go – but wait – you’re working in a silo where communication between teams is challenging and there are strict rules about how information is conveyed.\n\nMarketing programs manager [Jackie Gragnola](/company/team/#jgragnola) kicked off the “GitLab for Non-Tech & Project Management Use\" breakout session at [GitLab Contribute New Orleans](/events/gitlab-contribute/) with an icebreaker game that mirrors this very conundrum. Breakout group participants were assigned teams as they tried to rebuild a gumdrop structure, but with strict communication guidelines. One person could see the structure, and relay what the structure looks like to three runners, who then described the structure to one builder.\n\nNeedless to say, the inefficiencies mounted quickly.\n\n\"The problem was one person could use their eyes, one person could use their mouth, one person could use their ears,\" said [Joyce Tompsett](/company/team/#Tompsett), analyst relations manager at GitLab and an observer/reporter in this game. \"So, even though everybody had all the component pieces they were only allowed to use one function at a time and then there was no return communication allowed.\"\n\nThe “can’t see the whole picture” problem is a common one in every industry and the solution is to make collaboration painless. [Collaboration is one of our core values at GitLab](https://handbook.gitlab.com/handbook/values/#collaboration) and it is fundamental to how we run our business and how we designed our tool. To understand how GitLab can work outside of software development it’s helpful to understand the underpinnings.\n\n## How GitLab works\n\nDeveloping software is similar in concept to baking a layer cake. You need a really strong foundation to keep your cake upright, and each coating of frosting between the cake layers acts as the glue that holds it all together. The top layer of frosting makes sure that all of your layers stay in one place (and makes sure that the layer cake is looking like a cake).\n\n![layercake](https://about.gitlab.com/images/blogimages/gitlab-for-proj-management/layercakev2.jpg){: .shadow.medium.center}\nA layer cake is a great analogy for how GitLab works as a project management tool.\n{: .note.text-center}\n\n\"The frosting between those layers is like webhooks or APIs; they’re actually the integrations that make the two pieces of software talk to each other,\" explains [JJ Cordz](/company/team/#jjcordz), senior marketing ops manager. \"Each task that's above the next one can get more complex because it's building off the foundation that you've already put into place.\"\n\nThe difference between the typical DevOps layer cake and the GitLab layer cake is that every activity or function fulfilled by a different layer of the cake (i.e., discrete piece of software) happens entirely within GitLab. In the GitLab layer cake, everything from project planning to execution allows teams to collaborate together within a single tool.\n\nOur description of the GitLab layer cake is actually how GitLab is structured today: With groups at the top, followed by epics, and projects that have issues, templates, etc. All of the layers can work together to build a fluid workflow, or they can be used independently.\n\n\"So all of those pieces together can actually standalone or you can put them all together and it makes a really awesome process in a workflow,\" says JJ. \"You can actually have lots of teams working together to get something massive done, but you've broken it down into little pieces.\"\n\n## Project management within GitLab\n\nIf you want to start thinking about getting \"something massive done\" within GitLab consider these basic steps:\n\n*   **Create a framework**: Before diving into a new project, a good project manager will first define what the ideal state is and will then build a framework for achieving this ideal state.\n*   **Assign directly responsible individuals (DRIs)**: The PM will assign DRIs to different components of the project. Each DRI is responsible for that particular component and is the person that you can follow-up with regarding that component throughout the project.\n*   **Templatize repeated tasks**: Keep things efficient with templates.\n*   **Set service level agreements (SLAs) at each handoff point**: Think about the due date and work backward to sort out how long different tasks should be taking.\n*   **Write rules of engagement and fallback instructions**\n*   **Define the feedback process**: Ensure that you have a place for people to ask questions, and make the room to iterate as you go along.\n\nWhat does this look like in the real world? Our marketing team built a project management structure within GitLab that allows multiple teams to collaborate within the [marketing group](https://gitlab.com/gitlab-com/marketing). Each team (e.g., [corporate marketing](https://gitlab.com/gitlab-com/marketing/corporate-marketing)) has their own project, where other groups and projects can live.\n\n[Epics](https://docs.gitlab.com/ee/user/group/epics/) – which represent projects that contain multiple issues – also live at the marketing group level rather than living within smaller team projects. The [epics live at the marketing group level](/handbook/marketing/#issues-milestones-and-epics) because oftentimes multiple marketing teams (e.g., corporate marketing, product marketing, etc.) will be tagged in different issues within a particular epic.\n\n[Efficiency](https://handbook.gitlab.com/handbook/values/#efficiency) is another one of our values at GitLab and the marketing team created templates within different marketing teams for repeat tasks to keep processes more uniform and efficient.\n\nWe also created a unified, global view that allows us to track the progress of various marketing projects. We have four labels: work in progress (wip), plan, review, and scheduled, that are assigned to a marketing issue that indicates the various stages. The labels allow [Todd Barr](/company/team/#tbarr), our chief marketing officer, and anyone else on the marketing team to see a global overview of various issues within marketing as they move from the idea to completion phase.\n\n![unifiedview](https://about.gitlab.com/images/blogimages/gitlab-for-proj-management/labels.png){: .shadow.large.center}\nA global overview of all the activities happening in marketing, separated and labeled according to their current status.\n{: .note.text-center}\n\nThe marketing team uses two-tiers for our epics: the highest level is the ancestor (formerly called \"parent\") epic, and below that is the child epic. There can be multiple issues associated with the child epic, but an issue can only be associated with one epic.\n\n![epic-diagram](https://about.gitlab.com/images/blogimages/gitlab-for-proj-management/parent-child-epics.png){: .shadow.large.center}\nHow the marketing team uses ancestor epics and child epics.\n{: .note.text-center}\n\nNow that you understand the basics of GitLab and project management within GitLab, watch the video on executing sophisticated and integrated marketing programs.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/tbg8KSyIWVg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nAnd don’t miss the second part of this series where we put the spotlight on our internal successes using GitLab for project management.\n\nCover image by [Startaê Team](https://unsplash.com/@startaeteam) on [Unsplash](https://unsplash.com/s/photos/sticky-notes).\n{: .note}\n",[722,9,696],{"slug":1906,"featured":6,"template":699},"gitlab-for-project-management-one","content:en-us:blog:gitlab-for-project-management-one.yml","Gitlab For Project Management One","en-us/blog/gitlab-for-project-management-one.yml","en-us/blog/gitlab-for-project-management-one",{"_path":1912,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1913,"content":1919,"config":1927,"_id":1929,"_type":13,"title":1930,"_source":15,"_file":1931,"_stem":1932,"_extension":18},"/en-us/blog/gitlab-gdk-remote-development",{"title":1914,"description":1915,"ogTitle":1914,"ogDescription":1915,"noIndex":6,"ogImage":1916,"ogUrl":1917,"ogSiteName":685,"ogType":686,"canonicalUrls":1917,"schema":1918},"Contributor how-to: Remote Development workspaces and GitLab Developer Kit","This tutorial helps you get GDK working inside Remote Development workspaces to begin contributing to GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670563/Blog/Hero%20Images/cloudcomputing.jpg","https://about.gitlab.com/blog/gitlab-gdk-remote-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Contributor how-to: Remote Development workspaces and GitLab Developer Kit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Raimund Hook\"}],\n        \"datePublished\": \"2023-07-31\",\n      }",{"title":1914,"description":1915,"authors":1920,"heroImage":1916,"date":1922,"body":1923,"category":832,"tags":1924},[1921],"Raimund Hook","2023-07-31","Open source is fundamental to GitLab. We believe that [everyone can\ncontribute](https://about.gitlab.com/company/mission/#mission).\n\nTypically, we recommend that anyone contributing anything more than basic\nchanges to GitLab run the [GitLab Development\nKit](https://gitlab.com/gitlab-org/gitlab-development-kit) (GDK). Because\ncontributors can't always meet the GDK's resource demands, we're working to\nenable GDK inside the cloud-based GitLab Remote Development workspaces.\n\n\nIn this article, I'll explain how I used a Remote Development workspace\nrunning in my Kubernetes cluster to make working with the GDK faster and\neasier.\n\n\n## A preliminary note\n\nFirst, keep in mind that as of this writing the [Remote Development\nworkspaces](https://about.gitlab.com/direction/create/ide/remote_development/)\nfeature is still in Beta. My example here is therefore very much a proof of\nconcept — and as such, it has some rough edges.\n\n\nBefore getting started, I followed the \"[Set up a\nworkspace](https://docs.gitlab.com/ee/user/workspace/#set-up-a-workspace)\"\nprerequisites guide in the GitLab docs. For a more detailed set of\ninstructions, see Senior Developer Evangelist Michael Friedrich's tutorial\non [how to set up infrastructure for cloud development\nenvironments](https://about.gitlab.com/blog/set-up-infrastructure-for-cloud-development-environments/).\n\n\n## Getting started with workspaces\n\nTo start using workspaces, you will need a project configured with a\n`.devfile.yaml`. GitLab team members have curated [a number of example\nprojects](https://gitlab.com/gitlab-org/remote-development/examples) you can\nreview.\n\n\nInitially, I tried to do this with a fork of the GitLab project itself, but\nI ran into [some\nissues](https://gitlab.com/gitlab-org/gitlab/-/issues/414011) when the\nworkspace begins cloning the repository.\n\n\nTo figure out what was causing my problems, I looked more closely at what\nhappens behind the scenes when a workspace is created.\n\n\n## Behind the scenes with Remote Development workspaces\n\nWhen you create a new workspace, the following happens:\n\n1. The GitLab agent for Kubernetes creates a new namespace in your cluster.\nThe agent dynamically generates a name for and assumes management of the\nnamespace.\n\n1. Inside the namespace, a new deployment is created, specifying the\ncontainer you chose in your `.devfile.yaml` as the image to use.\n\n1. This deployment is configured with some [init\ncontainers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)\nthat perform some actions:\n    1. Cloning the repository into `/project/${project_path}`.\n    1. Injecting the VS Code server binary into your container.\n1. Once those init containers are complete, your container starts and the\nworkspace becomes available.\n\n\n## The clone problem\n\nWhen cloning a repository, `git` tends to do much of the work in memory.\nThis can be a challenge on larger projects/repositories, as it can require\nsignificant amounts of RAM. When cloning the GitLab project, for instance,\ngit consumes approximately 1.6GB of RAM. This number is only going to\nincrease with time. Sure, strategies like [shallow\nclones](https://git-scm.com/docs/git-clone#Documentation/git-clone.txt---depthltdepthgt)\ncan help reduce this, but these are perhaps less suited to active use by a\ndeveloper as they can increase the amount of time required to perform\nongoing git operations.\n\n\nIn fact, creating a workspace using our `.devfile.yaml` in a fork of the\nGitLab project failed for this reason. The init container performing the\nclone is currently hard-limited to 128MiB of RAM, after which the memory\nmanagement processes on the node kill the container.\n\n\nTo overcome this limitation, move the `.devfile.yaml` into the a fork of the\nroot of the GDK repository. This project clones more quickly (and does so\nusing fewer resources), so it's a  perfect starting point for running GDK\nitself. Another (bonus) advantage: You're then primed to contribute to the\nGDK itself, in addition to any of the other GitLab projects that the GDK\nclones.\n\n\n## Components of a GDK installation\n\nGDK clones the following projects from the GitLab 'family':\n\n* [GitLab](https://gitlab.com/gitlab-org/gitlab)\n\n* [Gitaly](https://gitlab.com/gitlab-org/gitaly)\n\n* [GitLab shell](https://gitlab.com/gitlab-org/gitlab-shell)\n\n\nThis allows you to work on any items in those directories as a part of your\n\"live\" installation.\n\n\n## Getting GDK installed and running in a workspace\n\nOnce I had a workspace up and running, my next step was to get GDK installed\nand running *in* that workspace. The GDK's documentation presents [several\nroutes for doing\nthis](https://gitlab.com/gitlab-org/gitlab-development-kit/#installation).\n\n\nA complete installation can take some time, as GDK needs to bootstrap itself\nand install a number of prerequisites. This is less than ideal in the\ncontext of a Remote Development workspace, as one of remote development's\nprimary benefits is enabling access to a development environment rapidly.\nRequiring a user to bootstrap an environment that takes 50 minutes (or\nlonger) doesn't help achieve this goal.\n\n\nTo combat this, I built a container image that effectively bootstraps and\ninstalls GDK, pre-building the GDK prerequisites and pre-seeding the\ndatabase. This image and its associated tooling are currently [in\nreview](https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/3231).\n\n\n## Pre-building\n\nPre-building the container and running the bootstrap process on a scheduled\nbasis allows us to perform that process once, without requiring the user to\nwait for something that can essentially be \"pre-canned\" for their use.\n\n\nOnce the workspace is running, we still need to \"reinstall\" the GDK\nenvironment with the latest version of our GitLab repository, but this step\ndoesn't take quite as long as a complete bootstrap.\n\n\n## Generating a gdk.yml file\n\nTo work properly, GDK also requires a [`gdk.yml`\nfile](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/doc/configuration.md#gdkyml).\nThis file tells GDK how to configure GitLab to return the correct URLs and\nother items. To get GDK running in Remote Development, Rails needs to return\nURLs in a certain scheme (otherwise your browser won't know where to\nconnect). To help this along, we [inject an environment\nvariable](https://gitlab.com/gitlab-org/gitlab/-/issues/415328) into the\nworkspace container. This variable helps us determine the URL in use (which\nis dynamically generated for each workspace).\n\n\nWe [now have a\nscript](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/support/gitlab-remote-dev/remote-dev-gdk-bootstrap.sh?ref_type=heads)\nin GDK that will generate your `gdk.yml` file based on your workspace.\n\n\n## Creating our devfile\n\nThe contents of my `.devfile.yaml` looks like this:\n\n\n```yaml\n\nschemaVersion: 2.2.0\n\ncomponents:\n  - name: tooling-container\n    attributes:\n      gl/inject-editor: true\n    container:\n      # NB! This image is only in use until https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/3231 is merged!\n      image: registry.gitlab.com/gitlab-org/gitlab-development-kit/gitlab-remote-workspace:stingrayza-gdk-remote-dev-add-container\n      memoryRequest: 10240M\n      memoryLimit: 16384M\n      cpuRequest: 2000m\n      cpuLimit: 6000m\n      endpoints:\n        - name: ssh-2222\n          targetPort: 2222\n        - name: gdk-3000\n          targetPort: 3000\n        - name: docs-3005\n          targetPort: 3005\n        - name: pages-3010\n          targetPort: 3010\n        - name: webpack-3808\n          targetPort: 3808\n        - name: devops-5000\n          targetPort: 5000\n        - name: jaeger-5778\n          targetPort: 5778\n        - name: objects-9000\n          targetPort: 9000\n        - name: shell-9122\n          targetPort: 9122\n```\n\n\nThis definition comes straight out of the [Workspace\ndocs](https://docs.gitlab.com/ee/user/workspace/#devfile), and opens a\nnumber of ports that GDK uses. (For now, I've only tested the port\n`gdk-3000`, which is the the link to our instance of GDK.)\n\n\n## From Workspace to GDK\n\nOnce we have a project with a `.devfile.yaml`, our final step is to [create\na new\nworkspace](https://docs.gitlab.com/ee/user/workspace/#create-a-workspace).\n\n\nAs a part of this step, your cluster will pull the image as defined in the\n`.devfile.yaml` and start it up. For the GDK image we pre-built, this can\ntake a few minutes.\n\n\nOnce the workspace is ready, the last step is to follow the link from the UI\nto connect to the workspace. This will open up a familiar VS Code IDE, with\nour GDK fork checked out.\n\n\nBut wait, where's GDK?\n\n\nWell, the pre-build did most of the work for us, but we still need to take a\nfew final steps before we can claim that GDK is up and running. These have\nbeen built into a script we can run from the integrated terminal within the\nworkspace.\n\n\nTo open a terminal, we can click on the VS Code Hamburger menu (top left),\nnavigate to `Terminal` and select `New Terminal`.\n\n\nNow we execute the following script, which completes the setup and copies a\ncouple of files over from the pre-built folders:\n\n\n```shell\n\nsupport/gitlab-remote-dev/remote-dev-gdk-bootstrap.sh\n\n```\n\n\nThis can take up to 15 minutes, but when it's done it should output the\nmagic words — something like the following (note the 3000 in the URL; we\nspecified that in the `.devfile.yaml` earlier):\n\n\n```shell\n\nSuccess! You can access your GDK here:\nhttps://3000-workspace-62637-2083197-apglwp.workspace.my-workspace.example.net/\n\n```\n\n\n## Connect to your GDK\n\nFollow the link as displayed using Cmd-click or Ctrl-click. After a couple\nof moments (GDK boot time), you should reach a familiar GitLab login screen.\n\n\nCongratulations! GDK is now running inside your Remote Development\nworkspace.\n\n\nTo log in, type `gdk` in your terminal and you'll see the default admin\ncredentials displayed near the bottom:\n\n\n```shell\n\n# Development admin account: xxxx / xxxx\n\n\nFor more information about GitLab development see\n\nhttps://docs.gitlab.com/ee/development/index.html.\n\n```\n\n\nLog into your GDK with the default credentials, change the admin user\npassword, and you're all set!\n\n\n## Demo of workspace launch\n\nHere's a demo of launching a workspace in my personal cluster:\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/iXq1NnTjnX0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n## How to contribute to GitLab\n\nIn this article I explained how to get GDK up and running in Remote\nDevelopment workspaces. This is not without its challenges, but the end\nresult should mean that contributing to GitLab (especially in\nresource-constrained environments) is quicker and easier.\n\n\nDo you want to contribute to GitLab? Come and join in the conversation in\nthe `#contribute` channel on GitLab's [Discord](https://discord.gg/gitlab),\nor just pop in and say \"hello.\"\n\n\n_Disclaimer: This blog contains information related to upcoming products,\nfeatures, and functionality. It is important to note that the information in\nthis blog post is for informational purposes only. Please do not rely on\nthis information for purchasing or planning purposes. As with all projects,\nthe items mentioned in this blog and linked pages are subject to change or\ndelay. The development, release, and timing of any products, features, or\nfunctionality remain at the sole discretion of GitLab._\n",[1035,722,9,1925,1926,721],"cloud native","contributors",{"slug":1928,"featured":6,"template":699},"gitlab-gdk-remote-development","content:en-us:blog:gitlab-gdk-remote-development.yml","Gitlab Gdk Remote Development","en-us/blog/gitlab-gdk-remote-development.yml","en-us/blog/gitlab-gdk-remote-development",{"_path":1934,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1935,"content":1940,"config":1947,"_id":1949,"_type":13,"title":1950,"_source":15,"_file":1951,"_stem":1952,"_extension":18},"/en-us/blog/gitlab-named-visionary-in-gartner-agile-planning-magic-quadrant",{"title":1936,"description":1937,"ogTitle":1936,"ogDescription":1937,"noIndex":6,"ogImage":803,"ogUrl":1938,"ogSiteName":685,"ogType":686,"canonicalUrls":1938,"schema":1939},"Gartner names GitLab visionary in enterprise agile planning","For the second consecutive year, Gartner validates our product vision.","https://about.gitlab.com/blog/gitlab-named-visionary-in-gartner-agile-planning-magic-quadrant","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab named a Visionary in 2020 Gartner Magic Quadrant for Enterprise Agile Planning Tools\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cormac Foster\"}],\n        \"datePublished\": \"2020-08-03\",\n      }",{"title":1941,"description":1937,"authors":1942,"heroImage":803,"date":1944,"body":1945,"category":1014,"tags":1946},"GitLab named a Visionary in 2020 Gartner Magic Quadrant for Enterprise Agile Planning Tools",[1943],"Cormac Foster","2020-08-03","\nGitLab was recently named a 'Visionary' by Gartner in their 2020 Magic Quadrant for Enterprise Agile Planning Tools. We're pleased to be recognized once again, despite being a fairly new entrant into the space.\n\nAs we [build toward lovability](/direction/maturity/) over the next year, we're excited to be recognized by industry experts like Gartner. While we continue to increase our breadth and depth, we also plan to double down on the unique benefits our single-application approach provides.\n\nIn the video below, [Justin Farris](/company/team/#justinfarris), group manager of Plan, lays out the team's long-term vision, including our five [jobs to be done](/direction/dev/#plan-1):\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://www.youtube-nocookie.com/embed/bT60rJEoWhw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nWe recently released a suite of enhanced work planning and management features in [GitLab 13.2](/releases/2020/07/22/gitlab-13-2-released/), with a lot more to come. Over the next twelve months, we plan to focus on three core areas:\n\n### Building a world class Agile planning experience\n\n> \"Agile is the dominant means of creating software today because it enables organizations\n> to respond to change quickly, to learn rapidly, and to deliver continuously. Making use of\n> agile practices at scale is essential to digital business success.\" Gartner, Magic Quadrant\n> for Enterprise Agile Planning Tools\n\nWe agree. In its many forms, [Agile](/solutions/agile-delivery/) is the way forward for modern business. We can't be everything to everyone, and we don't want to recreate the same bloated project management solutions enterprises have been using for years. But we **do** want to be the best solution for managing Agile projects and portfolios that you can use to take your business forward.\n\nTo that end, we're focused on delivering solutions that help you elevate your Agile planning from project management to portfolio planning, regardless of industry or tool choice. We recently released a [requirements management](https://docs.gitlab.com/ee/user/project/requirements/) feature, which will open opportunities to use GitLab for entirely new businesses, and we launched a vastly improved [Jira import process](https://docs.gitlab.com/ee/user/project/import/jira.html) to make it easier to transition to a GitLab workflow. While we continue to iterate on both of those, we'll also be improving the overall management experience with easier-to-use Kanban boards, [enhanced portfolio and group roadmaps](https://docs.gitlab.com/ee/user/group/roadmap/), and more robust epics and milestones.\n\n### Visibility and value stream management\n\nOf course, you can't plan without data, so visibility is another key driver of our roadmap. [Value stream management](/solutions/value-stream-management/) is a hot topic these days. To many, it's a refocusing of decades-old [value stream mapping](https://en.wikipedia.org/wiki/Value-stream_mapping) techniques to the software development lifecycle, measuring value added throughout the software development process and identifying inefficiencies that might keep you from delivering more.\n\nMeasurement is an essential part of the process, but at GitLab, we can also help you close the loop and take action – the \"management\" of that value stream. As a single application for the [DevOps lifecycle](/topics/devops/), GitLab has the unique ability to help you discover process bottlenecks, drill into the sources of waste for the root cause, and actually make changes to address them, whether that's reassigning an MR, mentioning someone to unblock the issue, or committing code changes.\n\nIn recent releases, we unlocked more flexible value stream workflows with [customizable value stream analytics](https://docs.gitlab.com/ee/user/analytics/value_stream_analytics.html#customizable-value-stream-analytics) and surfaced value metrics to more personas with [compliance](https://docs.gitlab.com/ee/user/compliance/compliance_report/index.html) and [security dashboards](https://docs.gitlab.com/ee/user/application_security/security_dashboard/#instance-security-dashboard). In the coming months, we'll continue to enhance our drill-down reporting and resolution, focusing on additional value metrics, additional dashboards, and automated recommendations for action.\n\n### Our customers\n\nOf course, as happy as we are to be recognized by Gartner, our users are the most important source of product guidance. At GitLab, everyone can contribute, and we wouldn't be the same company without the active participation of our users. That's why we've made our [maturity plan](/direction/maturity/) and [product vision](/direction/#vision) public and open for comment. For more information about enterprise Agile Planning in the coming year, please read our [FY21 Plan](/direction/dev/#fy21-plan-whats-next-for-dev)—and let us know what you think!\n\n### Related links\n\n* [2020 Magic Quadrant for Enterprise Agile Planning Tools (available to Gartner subscribers)](https://www.gartner.com/document/3983813?ref=solrAll&refval=255086013)\n* [We're dogfooding a tool to help visualize high-level trends in GitLab projects](/blog/insights/)\n* [How Marketing uses GitLab to manage complex projects](/blog/gl-for-pm-prt-2/)\n\n_Gartner \"Magic Quadrant for Enterprise Agile Planning Tools,\" Keith Mann, et al, 21 April 2020\nGartner does not endorse any vendor, product or service depicted in its research publications and does not advise technology users to select only those vendors with the highest ratings or other designation. Gartner research publications consist of the opinions of Gartner’s Research & Advisory organization and should not be construed as statements of fact. Gartner disclaims all warranties, expressed or implied, with respect to this research, including any warranties of merchantability or fitness for a particular purpose._\n",[744,9,790],{"slug":1948,"featured":6,"template":699},"gitlab-named-visionary-in-gartner-agile-planning-magic-quadrant","content:en-us:blog:gitlab-named-visionary-in-gartner-agile-planning-magic-quadrant.yml","Gitlab Named Visionary In Gartner Agile Planning Magic Quadrant","en-us/blog/gitlab-named-visionary-in-gartner-agile-planning-magic-quadrant.yml","en-us/blog/gitlab-named-visionary-in-gartner-agile-planning-magic-quadrant",{"_path":1954,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1955,"content":1961,"config":1967,"_id":1969,"_type":13,"title":1970,"_source":15,"_file":1971,"_stem":1972,"_extension":18},"/en-us/blog/gitlab-security-tools-and-the-hipaa-risk-analysis",{"title":1956,"description":1957,"ogTitle":1956,"ogDescription":1957,"noIndex":6,"ogImage":1958,"ogUrl":1959,"ogSiteName":685,"ogType":686,"canonicalUrls":1959,"schema":1960},"GitLab's security tools and the HIPAA risk analysis","A closer look at GitLab’s security scanning tools and the HIPAA risk analysis.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680548/Blog/Hero%20Images/gitlab-security-and-hipaa-risk-analysis.jpg","https://about.gitlab.com/blog/gitlab-security-tools-and-the-hipaa-risk-analysis","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's security tools and the HIPAA risk analysis\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Luka Trbojevic\"}],\n        \"datePublished\": \"2019-04-10\",\n      }",{"title":1956,"description":1957,"authors":1962,"heroImage":1958,"date":1964,"body":1965,"category":787,"tags":1966},[1963],"Luka Trbojevic","2019-04-10","\n\nThe importance of the HIPAA risk analysis (45 CFR § 164.308(a)(1)(ii)(A)) can’t be overstated.\nThe Office for Civil Rights (OCR) announced 2018 was an [all-time record year for HIPAA enforcement](https://www.hhs.gov/hipaa/for-professionals/compliance-enforcement/agreements/2018enforcement/index.html),\nand an incomplete risk analysis or inadequate follow-up on findings were cited in three of the major breaches.\n\nDigitization of healthcare is moving faster than ever. From patient portals to patient-reported\noutcomes platform, there’s an application for just about everything. But as we adjust our pace\nof building and innovating in this digital healthcare era, we must quickly recalibrate our pace\nof identifying risks and vulnerabilities in our software.\n\nYou may already know, GitLab is a single tool for the entire DevOps lifecycle, from project planning\nto deployment. But it’s also a powerful security tool that can add automated vulnerability scanning\nto your development process.\n\nLet’s take a closer look.\n\n## Using Static Application Security Testing to identify vulnerabilities in source code\n\nUsing [Static Application Security Testing](https://docs.gitlab.com/ee/user/application_security/sast/)\n(SAST), you can identify vulnerabilities in your source code. Setting up SAST is easy – you can\neither include the [SAST CI job](https://docs.gitlab.com/ee/user/application_security/sast/) or use\n[Auto SAST](https://docs.gitlab.com/ee/topics/autodevops/index.html#auto-sast).\nAfter that’s done, and every time the job is run, your source code will be scanned.\nWhen the scan is done, the results are [displayed right on the merge request](https://docs.gitlab.com/ee/user/application_security/sast/#how-it-works).\nAnd when you go to any pipeline with a SAST job, you’ll be shown a [security report](https://docs.gitlab.com/ee/user/application_security/sast/#security-report-under-pipelines) with the findings.\n\n## Using Dynamic Application Security Testing to identify vulnerabilities in web applications\n\nUnlike SAST, which scans source code for vulnerabilities, [Dynamic Application Security Testing](https://docs.gitlab.com/ee/user/application_security/dast/) (DAST) analyzes\nrunning web applications for vulnerabilities. It’s just as simple to set up as SAST – simply add\na DAST CI/CD job to your pipeline. DAST will also [display the findings directly in the merge request](https://docs.gitlab.com/ee/user/application_security/dast/#how-it-works)\nand create a [report artifact](https://docs.gitlab.com/ee/ci/yaml/#artifactsreportsdast).\n\n## Container Scanning\n\nIf you use Docker, you can use [Container Scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/)\nto scan your Docker images for vulnerabilities. This is again as simple as adding a [Container Scanning CI/CD job](https://docs.gitlab.com/ee/user/application_security/container_scanning/#configuring-with-templates) to your pipeline!\nThe scan will generate a [report artifact](https://docs.gitlab.com/ee/ci/yaml/#artifactsreportscontainer_scanning) you can download and review.\n\n## Secret Detection\n\nThe risk analysis standard requires both risks and vulnerabilities. One common risk is for secrets\n(API keys and passwords, for example) to be inadvertently leaked. To address that problem,\nwe’re working on [Secret Detection](https://gitlab.com/groups/gitlab-org/-/epics/675).\nIt’ll check files and configurations to identify potentially sensitive information, running every\ntime a commit is pushed to a branch.\n\n## Coming soon: Even more tools to assess risks and vulnerabilities\n\nIn the coming year we’ll be adding a number of product categories to our Secure stage to help\nimprove your application’s security and find more vulnerabilities. Here’s what you can look forward to:\n\n### Digging deeper for application vulnerabilities: Interactive Application Security Testing\n\n[Interactive Application Security Testing](https://gitlab.com/groups/gitlab-org/-/epics/344) (IAST)\nassesses an application’s response to an external security scan (like DAST) to identify vulnerabilities\nthat wouldn’t be caught by just the external scan. When this feature is complete, it’ll add yet\nanother layer of vulnerability detection to DAST.\n\n### Fuzzing\n\nAnother way to find application vulnerabilities is to generate random inputs and send them\nto the application. By doing this, you can find unintended behaviors in the application that\nmay result in a vulnerability. While fuzzing is often a niche technique, we’re\n[working on adding basic fuzzing capability straight into GitLab](https://gitlab.com/groups/gitlab-org/-/epics/818)!\n\n## Putting it all together\n\nToday, with GitLab, you can:\n\n* Identify vulnerabilities in your source code using SAST.\n* Identify vulnerabilities in your web application using DAST.\n* Identify vulnerabilities in your Docker containers using Container Scanning.\n* Scan for passwords, API keys, and other sensitive information with Secrets Detection.\n\nIn the near future, with GitLab, you’ll be able to:\n\n* Identify vulnerabilities in your application using IAST.\n* Identify vulnerabilities in your application with fuzzing.\n\n## Closing thoughts\n\nWhether you’re a four-person startup making the next groundbreaking healthcare analytics platform,\nor an academic medical center developing health applications, having security visibility where\nit didn’t exist previously is a good thing. And having that visibility incorporated directly into\nyour development process with minimal work and seamless integration is even better.\n\nWith GitLab’s security features you can incorporate automated vulnerability detection straight\ninto your development process. While the risk analysis requirement goes beyond just the software\nyou’re writing, as you write more code faster, automating part of the software security portion can only help.\n\n### Disclaimer\n\nTHE INFORMATION PROVIDED ON THIS WEBSITE IS TO BE USED FOR INFORMATIONAL PURPOSES ONLY. THE INFORMATION SHOULD NOT BE RELIED UPON OR CONSTRUED AS LEGAL OR COMPLIANCE ADVICE OR OPINIONS. THE INFORMATION IS NOT COMPREHENSIVE AND WILL NOT GUARANTEE COMPLIANCE WITH ANY REGULATION OR INDUSTRY STANDARD. YOU MUST NOT RELY ON THE INFORMATION FOUND ON THIS WEBSITE AS AN ALTERNATIVE TO SEEKING PROFESSIONAL ADVICE FROM YOUR ATTORNEY AND/OR COMPLIANCE PROFESSIONAL.\n{: .note}\n\nCover image by [rawpixel.com](https://www.pexels.com/@rawpixel?utm_content=attributionCopyText&utm_medium=referral&utm_source=pexels) on [Pexels](https://www.pexels.com/photo/clinician-writing-medical-report-1919236/?utm_content=attributionCopyText&utm_medium=referral&utm_source=pexels)\n{: .note}\n",[787,835,9],{"slug":1968,"featured":6,"template":699},"gitlab-security-tools-and-the-hipaa-risk-analysis","content:en-us:blog:gitlab-security-tools-and-the-hipaa-risk-analysis.yml","Gitlab Security Tools And The Hipaa Risk Analysis","en-us/blog/gitlab-security-tools-and-the-hipaa-risk-analysis.yml","en-us/blog/gitlab-security-tools-and-the-hipaa-risk-analysis",{"_path":1974,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1975,"content":1981,"config":1986,"_id":1988,"_type":13,"title":1989,"_source":15,"_file":1990,"_stem":1991,"_extension":18},"/en-us/blog/gitlab-value-stream-analytics",{"title":1976,"description":1977,"ogTitle":1976,"ogDescription":1977,"noIndex":6,"ogImage":1978,"ogUrl":1979,"ogSiteName":685,"ogType":686,"canonicalUrls":1979,"schema":1980},"The role of Value Stream Analytics in GitLab's DevOps Platform","Better DevOps teams start with value stream management. Here's how to get the most out of GitLab's Value Stream Analytics.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668041/Blog/Hero%20Images/Understand-Highly-Technical-Spaces.jpg","https://about.gitlab.com/blog/gitlab-value-stream-analytics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The role of Value Stream Analytics in GitLab's DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2022-01-24\",\n      }",{"title":1976,"description":1977,"authors":1982,"heroImage":1978,"date":1983,"body":1984,"category":693,"tags":1985},[1340],"2022-01-24","\n\n***\"Whenever there is a product for a customer, there is a value stream. The challenge lies in seeing it!\"*** *Learning to See - Shook & Rother*\n\nEvery company today is a software company so the level of innovation and delivery has a direct impact on revenue generation. In order to be successful, businesses must deliver an amazing digital experience, keep up with the latest technologies, deliver value at the speed demanded by customers, and do it all with zero tolerance for outages or security breaches. That's where value stream management comes into play.\n\n*“If you can’t describe what you are doing as a value stream, you don’t know what you’re doing.”* *(Martin, K. & Osterling, M. (2014). Value Stream Mapping. McGraw-Hill, p. 15.)*\n\nValue stream management(VSM) is a change in development mindset that puts the customer at the center. VSM allows teams to measure and improve the software delivery and value flow to customers. The development process is outlined from ideation until customer value realization. The focus is no longer on features and functionality – instead, organizations ensure the efforts and resources invested to deliver value to customers will improve flows that are causing bottlenecks, optimizing the cycle and shortening time to market. \n\nYou can learn more on [Value Stream Mapping](/topics/devops/value-stream-mapping/) here\n\n## An overview of GitLab's Value Stream Analytics \n\nAs part of [GitLab's DevOps Platform](/solutions/devops-platform/), Value Stream Analytics provides one shared view of the team's velocity. With insights into how long it takes the team to move from planning to monitoring, it's possible to pinpoint areas for improvement. Value Stream Analytics measures the time spent for each project or group. It displays the median time spent in each stage of the process by measuring from its start event to its end event. It helps identify bottlenecks in the development process, enabling management to uncover, triage, and identify the root cause of slowdowns in the software development life cycle and to quickly act on them to improve efficiency.\n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_1.png)\n\n## Why are Value Stream Analytics important? \n\nThe process of efficient software delivery starts by understanding where the slowest parts are, and what are the root causes behind them. With this information it's possible to build a plan for optimization.  \n\n## Which DevOps stages are tracked? \n\nThe stages tracked by Value Stream Analytics by default represent GitLab's DevOps Platform flow - \n**Issue**, **Plan**, **Code**, **Test**, **Review** and **Staging**.  \n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_stages.png)\n\n## How to customize GitLab's Value Stream Analytics \n\nNote: The stages can be customized in group evel Value Stream Analytics; currently no customization is available in the project level. \n\nClick Edit in the Value Stream Management \n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_4.png)\n\nClick Add another stage \n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_5.png)\n\nDefine stage name, and select start event and end event from the list. \n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_6.png)\n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_7.png)\n\n## The key metrics \n\nThe dashboard includes useful key metrics which help to understand the team performance. If, for example, the values of **new issues**, **commits** and **deploys** are high, it's clear a team is productive. The DevOps metrics commonly known as the **DORA (DevOps Research and Assessment) 4**. The [DORA 4 metrics](https://cloud.google.com/blog/products/devops-sre/using-the-four-keys-to-measure-your-devops-performance) show the value the team delivered to customers.\n\n**Deployment Frequency** shows how often code is deployed to production and brings value to end users. **Lead time for changes** measures how long it takes a change to get into production. Like deployment frequency, this metric measures team velocity.\n\n![vsa](https://about.gitlab.com/images/blogimages/vsa/vsa_metrics.png)\n\n## The importance of Value Stream Analytics within GitLab\n\nGitLab is a complete DevOps Platform, delivered as a single application. As such, teams use the same application during the development process from planning to monitoring. One of the benefits of being a single application for the entire DevOps lifecycle is that the data flows from all DevOps stages and is available for analysis, so Value Stream Analytics correlates and identifies how teams are spending their time without the need to integrate with an external tool. \n\nLearn more about [Value Stream Analytics for projects](https://docs.gitlab.com/ee/user/analytics/value_stream_analytics.html) and [Value Stream Analytics for groups](https://docs.gitlab.com/ee/user/group/value_stream_analytics/).\n\nTake a deeper dive into what DORA calls [elite DevOps teams](/blog/how-to-make-your-devops-team-elite-performers/).\n\n\n\n\n\n\n\n\n\n\n",[722,1074,9],{"slug":1987,"featured":6,"template":699},"gitlab-value-stream-analytics","content:en-us:blog:gitlab-value-stream-analytics.yml","Gitlab Value Stream Analytics","en-us/blog/gitlab-value-stream-analytics.yml","en-us/blog/gitlab-value-stream-analytics",{"_path":1993,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1994,"content":1999,"config":2005,"_id":2007,"_type":13,"title":2008,"_source":15,"_file":2009,"_stem":2010,"_extension":18},"/en-us/blog/gitlab-value-stream-management-and-dora",{"title":1995,"description":1996,"ogTitle":1995,"ogDescription":1996,"noIndex":6,"ogImage":1065,"ogUrl":1997,"ogSiteName":685,"ogType":686,"canonicalUrls":1997,"schema":1998},"Improving visibility: GitLab's value stream and DORA metrics","Optimize DevOps with the new DORA metrics in GitLab Value Stream Management.","https://about.gitlab.com/blog/gitlab-value-stream-management-and-dora","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Break the black box of software delivery with GitLab Value Stream Management and DORA Metrics\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2022-06-20\",\n      }",{"title":2000,"description":1996,"authors":2001,"heroImage":1065,"date":2002,"body":2003,"category":693,"tags":2004},"Break the black box of software delivery with GitLab Value Stream Management and DORA Metrics",[1660],"2022-06-20","\n\nOur customers frequently tell us that despite being very effective DevOps practitioners, they still struggle to build a data-driven DevOps culture. They find it especially hard to answer the fundamental question:\n\n_What are the right things to measure?_\n\nThis becomes more challenging in enterprise organizations when there are hundreds of different development groups, and there's no normalization between how things are done or measured. Because of this, we see a strong interest from customers for metrics that would allow them to standardize between teams and benchmark themselves against the industry.\n\n![Value Streams Analytics helps you visualize and manage the DevOps flow from ideation to customer delivery.](https://about.gitlab.com/images/blogimages/2022-06-dora-vsa-overview.png){: .shadow}\nValue Streams Analytics helps you visualize and manage the DevOps flow from ideation to customer delivery.\n{: .note.text-center}\n\n## What Are DORA Metrics? \n\nWith the continued acceleration of digital transformation, most organizations realize that technology delivery excellence is a must for long-term success and competitive advantage. After seven years of data collection and research, the [DORA's State of DevOps research program](https://www.devops-research.com/research.html) has developed and validated four metrics that measure software delivery performance: [(1) deployment frequency, (2) lead time for changes, (3) time to restore service and (4) change failure rate.](https://docs.gitlab.com/ee/user/analytics/#devops-research-and-assessment-dora-key-metrics) \n\nIn GitLab, The One DevOps Platform, [Value Stream Analytics (VSA)](/solutions/value-stream-management/) surfaces a single source of insight for each stage of the software development process. The analytics are available out of the box for teams to drive performance improvements.\n\n## What does DORA bring to Value Stream Analytics?\n\nValue Stream Analytics (VSA) measures [the entire journey from customer request to release](https://docs.gitlab.com/ee/user/group/value_stream_analytics/) and automatically displays the overall performance of the stream. Each stage in the value stream is transparent and compliant in a shared experience for everyone in the company. \n\nThis makes the VSA the single source of truth (SSoT) about what's happening within the entire software supply chain, with DORA’s metrics as the key measure of the value stream outputs. \n\n## How do Value Stream Analytics work?\n\nValue stream analytics measures the median time spent by issues or merge requests in each development stage.\n\nAs an example, a stage might begin with the addition of a label to an issue and end with the addition of another label:\n\n![Value stream analytics measures each stage from its start event to its end event.](https://about.gitlab.com/images/blogimages/2022-06-dora-vsa-stage.png){: .shadow}\nValue stream analytics measures each stage from its start event to its end event.\n{: .note.text-center}\n\nFor each stage, a table list displays the workflow items filtered in the context of that stage. [In stages based on labels](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#label-based-stages-for-custom-value-streams), the table will list Issues, and in stages based on Commits, it will list MRs:\n\n![The VSA MR table provides a deeper insight into stage time breakdown .](https://about.gitlab.com/images/blogimages/2022-06-dora-vsa-mr.png){: .shadow}\nThe VSA MR table provides a deeper insight into stage time breakdown.\n{: .note.text-center}\n\nThe tables provide a deep dive into the stage performance and allow users to answer questions such as:\n\n- How to easily see bottlenecks that are slowing down the delivery of value to customers?\n- How to reduce the time spent in each stage so I can deliver features faster and stay competitive? \n- How can we develop code faster?\n- How can we hand off to QA faster?  How can we push changes to Production more quickly?\n\nUsing the Filter results text box, you can filter by a project (example below) or parameter (e.g., Milestone, Label). \n\n![Value stream analytics filtering.](https://about.gitlab.com/images/blogimages/2022-06-dora-vsa-filter.png){: .shadow}\nValue stream analytics filtering.\n{: .note.text-center}\n\nNo login is required to view [Value stream analytics for projects](https://gitlab.com/gitlab-org/gitlab/-/value_stream_analytics) where you can become familiar with stream filtering, default stages and deep-dive tables. For a full view of the DORA metrics, you have to log in with your GitLab [Ultimate-tier](https://about.gitlab.com/pricing/) account or sign up for a [free trial](https://about.gitlab.com/free-trial/).\n\n## How to understand DevOps maturity and benchmark progress with the DORA metrics?\n\nDORA metrics can also provide answers to questions not related to VSA, such as:\n\n- How to become an elite team of DevOps professionals?\n- How do I perform vs. industry standards? \n- Is the organization better at DevOps this year than last?\n\n## Learn more about VSA and DORA:\n\n- Check out the GitLab Speed Run about DORA metrics in VSA:\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/wQU-mWvNSiI\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n- [GitLab DORA metrics API documentation](https://docs.gitlab.com/ee/api/dora/metrics.html)\n\n- [Step-by-step instructions for creating a custom value stream](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#create-a-value-stream-with-gitlab-default-stages)\n",[744,722,834,1074,9],{"slug":2006,"featured":6,"template":699},"gitlab-value-stream-management-and-dora","content:en-us:blog:gitlab-value-stream-management-and-dora.yml","Gitlab Value Stream Management And Dora","en-us/blog/gitlab-value-stream-management-and-dora.yml","en-us/blog/gitlab-value-stream-management-and-dora",{"_path":2012,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2013,"content":2019,"config":2026,"_id":2028,"_type":13,"title":2029,"_source":15,"_file":2030,"_stem":2031,"_extension":18},"/en-us/blog/gitlab-workflow-with-jira-jenkins",{"title":2014,"description":2015,"ogTitle":2014,"ogDescription":2015,"noIndex":6,"ogImage":2016,"ogUrl":2017,"ogSiteName":685,"ogType":686,"canonicalUrls":2017,"schema":2018},"Demo: GitLab + Jira + Jenkins","See how you can use our Jira and Jenkins integrations to reduce context switching and streamline your workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680048/Blog/Hero%20Images/gitlab-jira-jenkins-cover.png","https://about.gitlab.com/blog/gitlab-workflow-with-jira-jenkins","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Demo: GitLab + Jira + Jenkins\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Joel Krooswyk\"}],\n        \"datePublished\": \"2018-07-30\",\n      }",{"title":2014,"description":2015,"authors":2020,"heroImage":2016,"date":2022,"body":2023,"category":300,"tags":2024},[2021],"Joel Krooswyk","2018-07-30","\n\nOne of the things we love about GitLab is that while it can replace all your other software development lifecycle tools [(no, really)](/); it doesn't have to. Whether you want to rip and replace everything or use it for one or two stages of your workflow, [alongside your existing toolset](/partners/technology-partners/integrate/) (for now, or forever), we've got you covered.\n\nOne of the things we're most often asked about is how GitLab works together with [Jira](/solutions/jira/) for issue tracking, and [Jenkins](/solutions/jenkins/) for CI. This could be for one of two reasons:\n\n1. Your organization is happy with your issue tracking and CI solutions, and just want to use GitLab for other features, or\n2. You plan to move to GitLab for your end-to-end software development lifecycle, but that's a significant undertaking and it may be less disruptive to migrate on a project-by-project basis.\n\nNo matter the reason, what's important is maintaining the context of work without having to switch between applications frequently. With these integrations you can transition Jira issue states via GitLab, as well as see GitLab commits, branches, and merge requests in the Jira development panel. You can also view the status of Jenkins pipelines in GitLab to optimize your use of GitLab Merge Requests.\n\nI recorded this demo to show what a workflow using all three would look like.\n\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Jn-_fyra7xQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[2025,974,232,9],"CI",{"slug":2027,"featured":6,"template":699},"gitlab-workflow-with-jira-jenkins","content:en-us:blog:gitlab-workflow-with-jira-jenkins.yml","Gitlab Workflow With Jira Jenkins","en-us/blog/gitlab-workflow-with-jira-jenkins.yml","en-us/blog/gitlab-workflow-with-jira-jenkins",{"_path":2033,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2034,"content":2040,"config":2045,"_id":2047,"_type":13,"title":2048,"_source":15,"_file":2049,"_stem":2050,"_extension":18},"/en-us/blog/gitlab-zapier-integration",{"title":2035,"description":2036,"ogTitle":2035,"ogDescription":2036,"noIndex":6,"ogImage":2037,"ogUrl":2038,"ogSiteName":685,"ogType":686,"canonicalUrls":2038,"schema":2039},"There's a Zap for that. Automate your workflows with GitLab + Zapier","With Zapier's GitLab integration you can create new Issues directly from within Gmail, get Slack notifications for new Issues and much more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671323/Blog/Hero%20Images/zapier-gitlab-integration.jpg","https://about.gitlab.com/blog/gitlab-zapier-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"There's a Zap for that. Automate your workflows with GitLab + Zapier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2017-08-23\",\n      }",{"title":2035,"description":2036,"authors":2041,"heroImage":2037,"date":2042,"body":2043,"category":300,"tags":2044},[1133],"2017-08-23","\n\nZapier has just launched an integration with GitLab – hurrah! With a host of Zap templates, like triggering tweets for new commits, Slack messages when Merge Requests are opened or closed, or creating new Issues from starred emails in Gmail, this is great news for teams wanting to automate their workflows and collaborate more effectively.\n\n\u003C!-- more -->\n\nIf you're not a Zapier user yet, essentially it's a tool you can use to create integrations between your other tools which don't generally talk to each other, including Gmail, Slack, Twitter, Trello, Asana and now [GitLab](https://zapier.com/zapbook/gitlab/)! It's endlessly customizable, as you can create any integration you want using the \"Make a Zap\" button on the Zapier homepage. [See how to get started](https://zapier.com/zapbook/updates/1165/gitlab-integrations/?rebuild=yes).\n\nOur UX Lead [Sarrah Vesselov](/company/team/#SVesselov) gave the integration a spin and had this to say:\n\n>Overall, the number of pre-set zaps for GitLab was excellent. Trello, Asana, Gmail, Twitter integrations were easy to set up and are helpful for a number of potential workflows. There is great opportunity to use Issues and MRs to trigger automated workflows and increase collaboration and productivity for teams.\n\nCurious about those pre-set Zaps? These are our favorites:\n\n\u003Cscript src=\"https://zapier.com/zapbook/embed/widget.js?services=gitlab&container=true&limit=10\">\u003C/script>\n\nYou can [browse popular Zaps for GitLab here](https://zapier.com/zapbook/gitlab/). Missing something? [Create your own](https://zapier.com/app/editor/25451800/nodes/25451800/action) or tweet [@zapier](https://twitter.com/zapier) with your ideas.\n",[232,9],{"slug":2046,"featured":6,"template":699},"gitlab-zapier-integration","content:en-us:blog:gitlab-zapier-integration.yml","Gitlab Zapier Integration","en-us/blog/gitlab-zapier-integration.yml","en-us/blog/gitlab-zapier-integration",{"_path":2052,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2053,"content":2059,"config":2064,"_id":2066,"_type":13,"title":2067,"_source":15,"_file":2068,"_stem":2069,"_extension":18},"/en-us/blog/gitops-with-gitlab-using-ci-cd",{"title":2054,"description":2055,"ogTitle":2054,"ogDescription":2055,"noIndex":6,"ogImage":2056,"ogUrl":2057,"ogSiteName":685,"ogType":686,"canonicalUrls":2057,"schema":2058},"GitOps with GitLab: The CI/CD Tunnel","This is the fifth in a series of tutorials on how to do GitOps with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667236/Blog/Hero%20Images/Learn-at-GL.jpg","https://about.gitlab.com/blog/gitops-with-gitlab-using-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: The CI/CD Tunnel\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2022-01-07\",\n      }",{"title":2054,"description":2055,"authors":2060,"heroImage":2056,"date":2061,"body":2062,"category":832,"tags":2063},[1798],"2022-01-07","\n\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can find the entire \"Ultimate guide to GitOps with GitLab\" tutorial series [here](/blog/the-ultimate-guide-to-gitops-with-gitlab/)._\n\nIn this article, we will see how you can access a Kubernetes cluster using GitLab CI/CD and why you might want to do it even if you aim for [GitOps](/topics/gitops/).\n\n## Prerequisites\n\nThis post assumes that you have a Kubernetes cluster connected to GitLab using the GitLab Kubernetes Agent. If you don't have such a cluster, I recommend consulting the previous posts (linked above) to have a similar setup from where we will start today.\n\n## Meet the CI/CD Tunnel\n\nThe GitLab Kubernetes Agent is not just a GitOps tool that will enable pull-based deployments and be one more application to maintain beside the other 70 in your DevOps stack. The GitLab Kubernetes Agent aims to serve the GitLab vision of providing you a single application for the whole DevSecOps lifecycle. As a result, the Agent's goal is to provide an integrated experience with every relevant GitLab feature.\n\nWhat GitLab features does the Agent integrate with today?\n\n- GitLab CI/CD\n- Container network security\n- Container host security\n- Container scanning\n\nIn this post, we will focus on the GitLab CI/CD integration. Given the power and flexibility of GitLab CI/CD, the majority of our users have been using it for years successfully and, until the Agent appeared, they often had to manually script their cluster connections and deployments into it. If the previous setup sounds familiar, I recommend checking out the Agent's CI/CD integration features, the CI/CD tunnel. The CI/CD tunnel enables a cluster connection to be used from GitLab CI/CD, thus you need only minor adjustments to your existing setup, and will receive a GitLab supported component that we are continuously expanding to provide more and more integrations on top of it.\n\nThe CI/CD tunnel is always enabled in the project where you register and configure the Agent, and the given connection can be shared by other groups and projects, too. This way, a single connection can be reused throughout the organization to save on resource and maintenance costs.\n\nGitLab automatically injects the available Kubernetes contexts into the CI/CD runner environment's `KUBECONFIG`. As a result, you can activate a context and start using it without much setup.\n\n## How to configure the CI/CD tunnel\n\nAs already mentioned, the CI/CD tunnel is always enabled in the project where you register and configure the Agent. If you would like to use the tunnel in the same repository, no configuration is needed. If you would like to share the connection with other repositories, open your agent configuration file and add the following lines:\n\n```yaml\nci_access:\n   projects:\n   - id: path/to/project\n   groups:\n   - id: path/to/group\n```\n\nChange the placeholder paths here to your project or group path. Sharing a connection with a group enables access to all the projects within that group. Once you save the configuration file, you can turn your attention to your application project repository, and use the following job to list and select an agent:\n\n```yaml\ndeploy:\n   image:\n     name: bitnami/kubectl:latest\n     entrypoint: [\"\"]\n   script:\n   - kubectl config get-contexts \n   - kubectl config use-context path/to/agent-configuration-project:your-agent-name\n```\n\n## How to install GitLab integrated applications into your cluster\n\nAs an application of the above, let's install some applications into the cluster. As various GitLab features require applications in your cluster to be installed and configured for GitLab, Gitlab provides a cluster management project template to help you get started. One can easily install these GitLab integrated applications into their clusters using this template. Let's see how to use it with the CI/CD tunnel and the Agent!\n\n### Create the cluster management project\n\nFirst, let's create a new GitLab project using the \"Cluster Management Project\" template. Open the [create new project from template page](https://gitlab.com/projects/new#create_from_template), search for \"GitLab Cluster Management\", and start a new project with that template.\n\nYou will receive a project that already contains quite a lot of things! It comes with a ready-made `.gitlab-ci.yml` file and [helmfile](https://github.com/roboll/helmfile) based setup for 11 applications that integrate with various GitLab functionalities. [Each application might require different configurations](https://docs.gitlab.com/ee/user/clusters/management_project_template.html#built-in-applications). You can read about these in the linked documentation.\n\nAs part of this article, we will install NGINX Ingress and GitLab Runners using the cluster management project.\n\n### How to share the CI/CD tunnel\n\nThis newly created project needs access to one of your clusters. Let's share an Agent's connection with this project as described above. Edit your agent configuration file and add:\n\n```yaml\nci_access:\n   projects:\n   - id: path/to/your/cluster/management/project\n```\n\n### Pick the right Kubernetes context\n\nThe CI/CD tunnel is already available from within your cluster management project. We tried to make it simple to start using a cluster connection without the need to edit `.gitlab-ci.yml`. For simple setups, you can just set a `KUBE_CONTEXT` environment variable with the path to and name of your agent.\n\nSet an environment variable under \"Settings\" / \"CI/CD\" / \"Variables\"\n\n![KUBE_CONTEXT variable setup](https://about.gitlab.com/images/blogimages/2022-01-07-gitops-with-gitlab-using-ci-cd/KUBE_CONTEXT_setting.png)\n\n### How to install NGINX Ingress\n\nWe are ready to install any of the supported applications using this agent connection! Let's start by installing NGINX Ingress as it does not require any application-specific configuration.\n\nIn your cluster management project, edit `helmfile.yaml` and uncomment the line that points to the `ingress` application. Commit the changes and wait for GitLab magic to happen!\n\nThis was really easy!\n\n### How to install GitLab Runner\n\nAs GitLab Runner is more integrated with GitLab, it needs a little bit of configuration. [The Runner should know](https://docs.gitlab.com/ee/user/infrastructure/clusters/manage/management_project_applications/runner.html#required-variables) where it can find your GitLab instance and needs a token to authenticate with GitLab.\n\nTo make it simple for you to install a Runner fleet, you can configure these as environment variables. By default the `CI_SERVER_URL` variable is used to specify the GitLab url. You can overwrite this if needed. For the token, you should create `GITLAB_RUNNER_REGISTRATION_TOKEN` as a masked and protected environment variable with the value of your Runner registration token. Feel free to use either a project or a group registration token.\n\nFinally, as with the Ingress installation, uncomment the related line in the `helmfile.yaml`.\n\n## The full potential of the cluster management project\n\nThe cluster management project you created is yours. Thus, you are free to change it, extend it, or get rid of it. In this section, I would like to share with you a few ideas of how you might benefit the most from it.\n\n### Did you move away from Helm v2 already?\n\nThe `.gitlab-ci.yml` file in the cluster management project has a job that supports users to upgrade their Helm v2 installations to v3. If you never had these applications installed through a cluster management project with Helm v2, then you don't need that job. Feel free to delete it from your CI yaml.\n\n### Extend the project with your own apps\n\nThe cluster management project is self-contained as is. You can add your own helm/helmfile based application setups to it. To get started, I recommend to check out the [helmfile](https://github.com/roboll/helmfile) README.\n\n### Stay up to date\n\nWe want you to own the cluster management project, so you can upgrade the applications independently of GitLab releases. Still, you might prefer to follow GitLab releases, too, as you can expect improvements to the cluster management project template. How can you do that?\n\nIf you followed the `kpt` based Agent installation setup, you know that `kpt` can check out a git subtree and merge local changes with upstream changes when you request an update. You can use `kpt` here, too! \n\nAs you manage the cluster management project, you can replace selected applications with their `kpt` checkouts. For example, you can start following the upstream template with:\n\n```bash\ncd applicatioins\nrm -rf prometheus\nkpt pkg get https://gitlab.com/gitlab-org/project-templates/cluster-management.git/applications/prometheus prometheus\n```\n\nand update to the most recent version by running:\n\n```bash\nkpt pkg update applications/prometheus\n```\n\n## Recap\n\nAs we have seen in this article, the GitLab Kubernetes Agent provides way more possibilities than focused GitOps tools do. Besides supporting pull-based deployments, we support GitLab users with integrating into their existing CI/CD based workflows. Moreover, a Cluster Management Project template ships with GitLab that supplements the various GitLab integrations to simplify getting started with them.\n\n## What's next\n\nBuilding on our knowledge of the CI/CD tunnel, in the next article we will look into how to use Auto DevOps with the Agent.\n\n_[Click here](/blog/the-ultimate-guide-to-gitops-with-gitlab/) for the next tutorial._\n\n\n\n\n\n",[1742,722,9],{"slug":2065,"featured":6,"template":699},"gitops-with-gitlab-using-ci-cd","content:en-us:blog:gitops-with-gitlab-using-ci-cd.yml","Gitops With Gitlab Using Ci Cd","en-us/blog/gitops-with-gitlab-using-ci-cd.yml","en-us/blog/gitops-with-gitlab-using-ci-cd",{"_path":2071,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2072,"content":2077,"config":2082,"_id":2084,"_type":13,"title":2085,"_source":15,"_file":2086,"_stem":2087,"_extension":18},"/en-us/blog/gl-for-pm-prt-2",{"title":2073,"description":2074,"ogTitle":2073,"ogDescription":2074,"noIndex":6,"ogImage":1896,"ogUrl":2075,"ogSiteName":685,"ogType":686,"canonicalUrls":2075,"schema":2076},"2 Examples of how marketing uses GitLab to manage complex projects","How GitLab technology powers integrated marketing campaigns and product marketing projects.","https://about.gitlab.com/blog/gl-for-pm-prt-2","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"2 Examples of how marketing uses GitLab to manage complex projects\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-12-11\",\n      }",{"title":2073,"description":2074,"authors":2078,"heroImage":1896,"date":2079,"body":2080,"category":832,"tags":2081},[1901],"2019-12-11","\n\n_In [part one of this series](/blog/gitlab-for-project-management-one/) we looked at the pervasive problems around collaboration and how GitLab was built to resolve those challenges both in and out of the software development space. In this second part we take a detailed look at how our marketing teams used GitLab for project management._\n\nWhen we jumped in to using GitLab for project management, we did it in a big way. The [Just Commit marketing campaign](https://gitlab.com/groups/gitlab-com/marketing/-/epics/7) which launched in January 2019 is a good example of how the marketing team uses GitLab features like issues and epics.\n\n\"It was our first integrated campaign, and if you're not familiar with what that means, it's basically landing a single message across all channels,\" says [Jackie Gragnola](/company/team/#jgragnola), marketing programs manager. “So using social media, digital marketing, all of our content, our website. and in doing so, it was involving a lot of different team members.\"\n\nSince there were so many stakeholders involved, it was unrealistic that something like a Google Doc could provide the infrastructure necessary for efficient and transparent collaboration. Jackie migrated her kick-off document from Google Docs over to GitLab. \"It was the first test into using epics to give the high-level information and then organize the group into a single unified vision for what this campaign would become,\" she explains.\n\n![justcommit-integratedcampaign](https://about.gitlab.com/images/blogimages/gitlab-for-proj-management2/justcommit_integratedcampaign.png){: .shadow.large.center}\nThe Just Commit integrated campaign epic included the JustCommit label, as well as campaign goals, personas the campaign is targeting, links to recorded meetings, and more.\n{: .note.text-center}\n\nThe Just Commit ancestor epic also included details such as [UTM tracking links](https://gitlab.com/groups/gitlab-com/marketing/-/epics/7#utm-for-tracking-urls), a [list of teams and DRIs involved in the campaign](https://gitlab.com/groups/gitlab-com/marketing/-/epics/7#teams-involved-roles-responsibilities), and a [timeline of key dates and deliverables](https://gitlab.com/groups/gitlab-com/marketing/-/epics/7#key-timeline-dates) in the lead-up to the Feb. 18, 2019 launch.\n\nA level below the ancestor epic are child epics, which were organized by areas of action items. Some examples include organic search, webcasts, emails, and events; messaging and positioning, etc.\n\n![justcommit-child epics](https://about.gitlab.com/images/blogimages/gitlab-for-proj-management2/jc-childepics.png){: .shadow.large.center}\nExamples of some of the child epics for the Just Commit integrated campaign.\n{: .note.text-center}\n\nThe Just Commit label that was created was tagged to issues related to the campaign. It is simple enough to get a high-level overview of what issues are related to the Just Commit campaign by searching for the label.\n\nIn order to dig deeper into the different categories of work, you’d look at the issue list within the different child epics. The issue list functions essentially as a list of what needs to get done, and provides a good overview of what’s left to accomplish on the list.\n\n![justcommit-issue list](https://about.gitlab.com/images/blogimages/gitlab-for-proj-management2/jc-strategy-and-design.png){: .shadow.large.center}\nThis is an example of the issue list from the strategy and design child epic.\n{: .note.text-center}\n\nInside each issue is a DRI and a due date. The due dates were important not just to stay ahead of deadline, but also because there were a lot of dependencies baked into the integrated campaign.\n\n\"We couldn't work on the content until we knew what the message was, and we couldn't work on anything related to digital marketing until we had the designs approved,\" says Jackie. \"So, this just kept us organized by saying what we needed to get done by what dates and kept us up-to-date on the timeline that would help us hit that delivery date.\"\n\nBy using GitLab features such as ancestor epics, child epics, issues, and labels, the Just Commit integrated campaign kept all stakeholders updated on their progress and accountable for their deliverables.\n\n## How product marketing uses GitLab\n\n[Tye Davis](/company/team/#davistye) is a technical marketing manager and he uses GitLab for managing product marketing projects.\n\n### Use issue boards to get a global overview of work\n\nTye works primarily within the [product marketing project](https://gitlab.com/gitlab-com/marketing/product-marketing), which is housed in the broader marketing group. Just like we saw in the Just Commit integrated campaign, there are various ancestor epics, child epics, and issues housed within this project.\n\nThe [issue board view](https://docs.gitlab.com/ee/user/project/issue_board.html) is a useful way to visualize and organize all the issues and activity happening within a specific group or project. Viewing an issue board is simple enough: Just select boards under the issues tab to see all of the issues within a specific group, or to narrow the scope select a specific project. But building one is another matter entirely.\n\nIt is important to think strategically about the level at which you build your issue board, because that will impact how much information is rolled up into the board.\n\n\"You have to think about where your work lies and where you should be building your issue boards in epics,\" says [JJ Cordz](/company/team/#jjcordz), senior marketing ops manager. \"As an example, in marketing ops we presently work across departments so we do a lot of with sales ops, biz ops, sales in general, and all of those are individual projects and groups. So our issue board is actually built at this highest level (i.e., marketing group level) because we need to pull in everything else.\"\n\nBut not every team is as integrated as marketing ops. Sometimes building an issue board at the team level, instead of the group or project level, makes the most sense for your workflow.\n\nThe [technical marketing team has its own issue board](https://gitlab.com/gitlab-com/marketing/product-marketing/-/boards/926375?&label_name[]=tech-pmm), and it is sorted by labels. The labels it uses are uniform across the marketing group to indicate the status of a particular issue – `status: plan`, `status: WIP`, `status: scheduled`, or `status: review`. The labels automatically change when a particular issue is dragged between label lanes.\n\nThe use of these labels and the different team boards that live within the product marketing group allows anyone to take a look at the status of both individual issues and larger projects.\n\n### Team boards\n\nAnother option to configure an issue board is to base it on teams and sort based on an assignee. The team board view sorted by assignee allows you to see what each team member is working on.\n\n“We create boards based on assignee. This allows us to see who has what issue and what they're working on,\" says Tye. “Maybe your manager just wants to see what the team's working on or you're being a collaborative Agile team and want to just see what everyone's doing or what you could work on together.\"\n\n### Tracking progress\n\nThere are two main options for measuring work progress from a project management perspective: [milestones](https://docs.gitlab.com/ee/user/project/milestones/#project-milestones-and-group-milestones) and [burndown charts](https://docs.gitlab.com/ee/user/project/milestones/burndown_and_burnup_charts.html).\n\nMilestones are time-bound and track work output based on a specific timeframe (e.g., Q1 FY20 – a four-month period). When creating an issue, you can assign it to a specific milestone.\n\nBurndown charts reflect all the issues that are completed within the specific milestone. Once the time period (e.g., Q1 FY20), is up, you move any remaining and new work over to the next milestone (e.g., Q2 FY 2020).\n\n### Relating to GitLab customers\n\nWhile the marketing team and other teams across the company use GitLab as a project management tool, the majority of our customers are engineers that use GitLab as an Agile planning tool for developing code.\n\nWe can still relate to our customers through our use of issues and merge requests to make changes to the handbook, publish blog posts, among other activities in different repositories within GitLab.\n\nWhether you’re an infrastructure engineer, product marketing manager, or even an editor for the GitLab blog, the GitLab product functions as a sophisticated and customizable project management tool where collaboration and efficiency are baked into the function and design.\n\nWatch the video from [GitLab Contribute](/events/gitlab-contribute/) in New Orleans to see an overview of how GitLab can be used for project management, plus more on using GitLab for integrated campaigns and product marketing.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/tbg8KSyIWVg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCover image by [Startaê Team](https://unsplash.com/@startaeteam) on [Unsplash](https://unsplash.com/s/photos/sticky-notes).\n{: .note}\n",[722,9,696],{"slug":2083,"featured":6,"template":699},"gl-for-pm-prt-2","content:en-us:blog:gl-for-pm-prt-2.yml","Gl For Pm Prt 2","en-us/blog/gl-for-pm-prt-2.yml","en-us/blog/gl-for-pm-prt-2",{"_path":2089,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2090,"content":2095,"config":2100,"_id":2102,"_type":13,"title":2103,"_source":15,"_file":2104,"_stem":2105,"_extension":18},"/en-us/blog/guide-to-ci-cd-pipelines",{"title":2091,"description":2092,"ogTitle":2091,"ogDescription":2092,"noIndex":6,"ogImage":1086,"ogUrl":2093,"ogSiteName":685,"ogType":686,"canonicalUrls":2093,"schema":2094},"A quick guide to GitLab CI/CD pipelines","How GitLab is making a better pipeline with Auto DevOps.","https://about.gitlab.com/blog/guide-to-ci-cd-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A quick guide to GitLab CI/CD pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-07-12\",\n      }",{"title":2091,"description":2092,"authors":2096,"heroImage":1086,"date":2097,"body":2098,"category":718,"tags":2099},[1113],"2019-07-12","\nTo be successful with [DevOps](https://about.gitlab.com/topics/devops/), teams must use [automation](https://docs.gitlab.com/ee/topics/autodevops/), and [CI/CD pipelines](https://about.gitlab.com/topics/ci-cd/) are a big part of that journey. At its most basic level, a pipeline gets code from point A to point B. The quicker and more efficient the pipeline is, the better it will accomplish this task.\n## What is a CICD pipeline?\n\nA pipeline is the lead component of continuous integration, delivery, and deployment. It drives software development through building, testing and deploying code in stages. Pipelines are comprised of jobs, which define what will be done, such as compiling or testing code, as well as stages that spell out when to run the jobs. An example would be running tests after stages that compile the code.\n\nA CI/CD pipeline automates steps in the SDLC such as builds, tests, and deployments. When a team takes advantage of automated pipelines, they simplify the handoff process and decrease the chance of human error, creating faster iterations and better quality code. Everyone can see where code is in the process and identify problems long before they make it to production.\n\nBefore we dive in, let's cover some basics:\n\n## The GitLab pipeline glossary\n\n**Commit**: A code change.\n\n**Job**: Instructions that a runner has to execute.\n\n**Pipeline**: A collection of jobs split into different stages.\n\n**Runner**: An agent or server that executes each job individually that can spin up or down as needed.\n\n**Stages**: A keyword that defines certain stages of a job, such as `build` and `deploy`. Jobs of the same stage are executed in parallel.\nPipelines are configured using a version-controlled YAML file, `.gitlab-ci.yml`, within the root of a project. From there, you can set up parameters of your pipeline:\n\n*   What to execute using [GitLab Runner](https://docs.gitlab.com/ee/ci/runners/#configuring-gitlab-runners)\n*   What happens when a process succeeds or fails\n\nNot all jobs are so simple. For larger products that require cross-project interdependencies, such as those adopting a [microservices architecture](/blog/strategies-microservices-architecture/), there are [multi-project pipelines](/blog/use-multiproject-pipelines-with-gitlab-cicd/).\n\n![multi-project pipelines](https://about.gitlab.com/images/topics/multi-project_pipelines.png){: .shadow.medium.center }\n\nIn GitLab 9.3 we made it possible to display links for upstream and downstream projects directly on the pipeline graph, so developers can check the overall status of the entire chain in a single view. Pipelines continue to evolve, and in our [CI/CD product vision](https://about.gitlab.com/direction/ops/) we’re looking into making pipelines even more cohesive by implementing [Multiple Pipelines in a single `.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab-ce/issues/22972) in the future.\n\n## Pipeline as code\n\nDefining deployment pipelines through source code such as Git, is known as pipeline as a code. The pipeline as code practice is part of a larger “as code” movement that includes infrastructure as code. Teams can configure builds, tests, and deployment in code that is trackable and stored in a centralized source repository. They can use a declarative YAML approach or a vendor-specific programming language, such as Jenkins and Groovy, but the premise remains the same.\n\nA pipeline as code file specifies the stages, jobs, and actions for a pipeline to perform. Because the file is versioned, changes in pipeline code can be tested in branches with the corresponding application release.\n\nThe pipeline as code model of creating continuous integration pipelines is an industry best practice. There are multiple benefits, such as the ability to store CI pipelines and application code in the same repository. Developers can also make changes without additional permissions, working with tools they’re already using.\n\nOther benefits are more efficient collaboration and the ability to keep information accessible so team members can act on their decisions. Pipeline changes are subject to a code review process, avoiding any break in the pipeline migration.\n\nDeployment pipelines are in a version control system independent of continuous integration tools. Pipelines can be restored if the continuous integration system goes down. If a team wants to switch CI tools at another point, pipelines can be moved into a new system.\n\nIn the early iterations of [CI/CD](/topics/ci-cd/), DevOps tools set up pipelines as point-and-click or through a GUI. This originally presented a number of challenges:\n\n*   Auditing was limited to what was already built in\n*   Unable to collaborate\n*   Difficulty troubleshooting\n\nSomething as simple as rolling back to the last known config was an exercise in futility. CI/CD pipelines during this time were prone to breaking, lacked visibility, and were difficult to change.\n\nThe pipeline as code model corrected a lot of these pain points and offered the flexibility teams needed to execute efficiently. With source code, teams could use Git to search and introspect changes.\n\nToday, many tools have adopted YAML configuration as a best practice. GitLab CI/CD has used code, rather than GUI, since the beginning for pipeline configuration. \"Pipeline as code\" comes with many of the same benefits the other \"as code\" trends have:\n\n*   **Version control** – keep track of changes over time and revert to previous configurations easily\n*   **Audit trails** – know when and what changes were made to the source code\n*   **Ease of collaboration** – code is available to the team for improvements, suggestions, and updates\n*   **Knowledge sharing** – import templates and code snippets so teams can share best practices\n*   **Built-in Lint tool** – ensures YAML file is valid and assists new users\n\nThe principles of software development apply not only to the applications we deliver but also to _how_ we build them. The pipeline as code model creates automated processes that help developers build applications better and faster. Having everything documented in a source repository allows for greater visibility and collaboration so that everyone can continually improve processes, which is what DevOps is all about.\n\n## What are the different stages of a GitLab CI/CD pipeline?\n\nPipelines are comprised of jobs, which define _what_ to do, such as compiling or testing code; stages, which define _when_ to run the jobs; and runners, which are agents or servers that execute each job, and can spin up or down as needed.\n\nPipelines are generally executed automatically and don’t need any intervention once they are created. \n\nA typical pipeline generally consists of a few stages in the following order:\n\n### Test\nThe test stage is where the code is assess to ensure there are no bugs and it is working the way it was designed to before it reaches end users. The test stage has a job called deploy-to stage. Unit testing on small, discrete functions of the source may also done. All unit tests running against a code base are required to pass. If they don’t that creates a risk that must be addressed right away.\n\n### Deploy\nThe staging stage has a job called deploy-to-stage, where a team can conduct further tests and validation. It is followed by a production stage with a job called deploy-to-production. If the code passes a series of automated tests, often the build will automatically deploy. [The endpoint is typically pre-production deployment](https://www.techtarget.com/searchsoftwarequality/CI-CD-pipelines-explained-Everything-you-need-to-know). Once the build’s integrity is completely validated by stakeholders, it can be deployed to an actual production environment. Once the build passes pre-deployment testing, in a continuous deployment pipeline, it is automatically deployed to production.Then, it is monitored. To do so effectively requires collecting and [analyzing metrics](https://about.gitlab.com/topics/ci-cd/continuous-integration-metrics/) such as deployment frequency, deployment time and lead time for changes.\n\n## How do I set up a GitLab CI/CD pipeline?\nPipeline templates are useful because writing them from scratch is a time-consuming and onerous process. GitLab has pipeline templates for more than 30 popular programming languages and frameworks. Templates to help you get started can be found in our [CI template repository](https://gitlab.com/gitlab-org/gitlab/tree/master/lib/gitlab/ci/templates).\n\nA GitLab pipeline executes several jobs, stage by stage, with the help of automated code.\n\nA continuous integration pipeline involves building something from the scratch and testing the same in a development environment. It might occur to the developers to add something after building the application and pushing it into production. This can be done with the help of continuous integration where we can add the code even after it is deployed.\n\nThis phase includes testing as well where we can test with different approaches in the code.\n\n### CD Pipeline prerequisites \nTo get started, you need to set up an [Ubuntu 18.04 server](https://www.digitalocean.com/community/tutorials/initial-server-setup-with-ubuntu-18-04) along with a sudo non-root user and firewall. You also need at least 1 GB RAM and 1 CPU.\n\n[Docker](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-18-04) must be installed on the server.\nA user account on a GitLab instance with an enabled container registry. The free plan of the [official GitLab instance](https://gitlab.com/) meets the requirements. You can also host your own GitLab instance by following the [How To Install and Configure GitLab on Ubuntu 18.04 guide](https://www.digitalocean.com/community/tutorials/how-to-install-and-configure-gitlab-on-ubuntu-18-04).\nThen you should create a GitLab project, adding an HTML file to it. Later, you’ll copy the HTML file into an Nginx Docker image, which in turn, you will deploy to the server.\n\n1. Log in to your GitLab instance and click new project.\n2. Give it a proper Project name.\n3. Optionally add a Project description.\n4. Make sure to set the Visibility Level to Private or Public depending on your requirements.\n5. Finally click Create project\n\n## Building better pipelines with Auto DevOps\n\nCI/CD pipelines have automated so much of the development process, however, it will still take time to do the initial work of building and configuring them in your environment. But what if you aren’t sure what all the parts of your CI/CD pipeline should be? What are the best practices you should know at every stage?\n\nIn the past, there have only been two choices: Time-consuming configuration from scratch with complete customization, or an easier auto-configuration with much less flexibility. Developers have longed for the moment where they could click a button and have a complete pipeline with code quality, language detection, and all scripts included with very little manual work.\n\n[Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) is our solution to this problem. It is a pre-built, fully-featured CI/CD pipeline that automates the entire delivery process. Instead of having to choose between time and flexibility, GitLab offers both. In addition to the Auto DevOps template, GitLab offers several CI templates that can be modified as necessary, or you can override specific settings. Want all the power of Auto DevOps for a custom test job? Just override the `script` block for the `test` job and give it a try. Since templates are also modular, teams have the option to pull in only the parts they need.\n\nWe hope this blog post gives you some insight into how we approach pipeline as code and our larger vision for how we’re improving the CI/CD pipeline experience in the future. Automated pipelines increase development speed and improve code quality, and we’re actively working on making them even better and easier to use.\n\nCover image by [Gerrie van der Walt](https://unsplash.com/photos/m3TYLFI_mDo?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/pipes?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[722,9,108],{"slug":2101,"featured":90,"template":699},"guide-to-ci-cd-pipelines","content:en-us:blog:guide-to-ci-cd-pipelines.yml","Guide To Ci Cd Pipelines","en-us/blog/guide-to-ci-cd-pipelines.yml","en-us/blog/guide-to-ci-cd-pipelines",{"_path":2107,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2108,"content":2114,"config":2121,"_id":2123,"_type":13,"title":2124,"_source":15,"_file":2125,"_stem":2126,"_extension":18},"/en-us/blog/high-efficiency-innovation",{"title":2109,"description":2110,"ogTitle":2109,"ogDescription":2110,"noIndex":6,"ogImage":2111,"ogUrl":2112,"ogSiteName":685,"ogType":686,"canonicalUrls":2112,"schema":2113},"3 lessons for innovation and rapid execution from GitLab","Guest author Jay Newman recently shadowed our CEO to discover how we move so quickly.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680169/Blog/Hero%20Images/high-efficiency-innovation.jpg","https://about.gitlab.com/blog/high-efficiency-innovation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"High-efficiency innovation: 3 lessons to learn from GitLab's culture of rapid execution\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jay Newman\"}],\n        \"datePublished\": \"2018-03-27\",\n      }",{"title":2115,"description":2110,"authors":2116,"heroImage":2111,"date":2118,"body":2119,"category":811,"tags":2120},"High-efficiency innovation: 3 lessons to learn from GitLab's culture of rapid execution",[2117],"Jay Newman","2018-03-27","\n\nAll companies have different ways of creating new products and services. Despite that, there are a few patterns that show up consistently. At [Jump](http://www.jumpassociates.com), we like to call those patterns the different \"cultures\" of innovation. One such pattern has to do with execution. Great executors (like GE and FedEx) are masters of sharp focus and efficient machine-making.\n\nMany of the Fortune 500 companies that we work with do their best innovation this way. They've built infrastructure that excels at launching products globally, coordinating thousands of employees and operating at massive scale. These companies often ask us what they can learn from what's going on in Silicon Valley. There's much to learn, of course, from the startups and entrepreneurial ecosystem here.\n\nThe important question is not \"How do they do things in Silicon Valley?\" Instead, it's \"What can I learn that would work well in my organization?\" It's always exciting to come across a startup that's doing what these big companies do best – execute at scale – and doing it in a completely different way.\n\nGitLab is one such company. They're an open source software company powering many of the world's largest corporations. They've developed a surprising – and strong – culture of innovation. They're a remote-only company. There's no physical headquarters or office space for their 200+ employees located worldwide. They proudly admit that they value \"boring solutions.\" Their [entire business strategy is available](/company/strategy/) for the public and their competitors to see. They're respected for their [product](/blog/gitlab-leader-continuous-integration-forrester-wave/), their [culture](/company/culture/), and their [results](http://www.businessinsider.com/gitlab-raises-20-million-from-gv-2017-10).\n\nMany companies pride themselves on their ability to iterate quickly and answer yes/no decisions rapidly. Even they might be surprised at the scope and scale of GitLab's efficiency. GitLab drives high-efficiency innovation through a culture of rapid execution. They weave speed directly into the fabric of who they are and what they do. Do you want to learn how they do it? I recently shadowed GitLab's CEO, [Sid Sijbrandij](/company/team/#sytses), and his team for a day.\n\nHere's how they make it happen.\n\n## When the answer is clear, build for speed. Speed wins.\n\n*Why build a culture of rapid execution?*\n\nWith such a unique team culture and set of business practices, the first thing I wanted to learn from Sid was why GitLab operates the way it does. What became clear was that it's all very intentional.\n\nA few key beliefs are central to the decisions they've made:\n\n### Belief 1: The solution required to win is already super clear to everyone.\n\nThey're operating in a market called DevOps, which is about creating platforms and tools for software developers to use in their work. It's a market where both the unmet customer need and the ideal solution are clear to everyone.\n\nThey were newer to the game than some brand name and legacy competitors, so they chose to prioritize speed over invention to get to the finish line first.\n\n### Belief 2: If you don't do anything new, you can do things faster, bigger and better.\n\nThe folks at GitLab believe that it's better to be boring. They value \"boring solutions.\" It's not because boring is better in and of itself. It's because boring is efficient. It's faster. And faster can become bigger. And when you add in collaboration with a global open source community, bigger can become better.\n\nIf there's a market standard, they don't try to create something different. They get on board. As Sid says, \"It's about convention over conviction. We make sure everyone [in the open source community] is enticed to participate. If the rest of the world is doing it in some way, we should be doing it in that way.\"\n\n### Belief 3: It's OK not to make everyone happy.\n\nIt's hard for most companies – and most people – to change to what made them successful in the first place. For GitLab, making those kinds of changes is critical to achieving the growth they seek. So on a daily basis, they choose to act quickly, make mistakes quickly, and learn from those mistakes quickly.\n\nThat can lead to decisions – big and small – that might not make everyone happy.\n\nWhen they launch a completely new version of GitLab (they're on version [10.6](/releases/2018/03/22/gitlab-10-6-released/) right now), they always add some things that will frustrate some existing customers, and they often take away things that other customers love.\n\n\"There's way more people not using GitLab than that are. So we should always optimize for those future customers, not your current ones. That's why companies slow down. They start listening. Engineers want to fix the current bugs. Sales wants to keep the old deck that works for them. You start listening to your customers and what they need you to maintain or fix. The natural motion of any company is to slow down. So as CEO you need to get the company beyond that.\"\n\nSo what does high-efficiency innovation and rapid execution look like at GitLab?\n\nHere are a few examples of the pace at which they operate:\n\n1. They release a new version of GitLab every single month.\n1. Everything is in draft and subject to change. It's always under construction.\n1. They don't repeat themselves. GitLab documents how it does things in a [handbook](https://handbook.gitlab.com/handbook/). It's 1,000 pages long. If it's in the handbook, don't repeat it.\n1. Every conference call starts on time. No wasted minutes. Sid checks 15-30 action items off the list in each of his 25-minute 1-on-1 meetings.\n1. They trust their team to multi-task appropriately. If you want to check email during a meeting, it's probably more important than the meeting is to you.\n\nThere's a final, often-overlooked value of speed: it's exciting. Workplaces that manage to pair speed with evident progress allow their teams to feel accomplished, motivated, and on the edge of their seats. It's an easy hack for maintaining employee engagement.\n\n## Don't sacrifice long-term vision for short-term speed. Be accountable for both.\n\n*What is GitLab is rapidly executing on?*\n\nMany companies who prize execution do a great job at sustaining and growing their existing products. They're often quite efficient – though they could learn something from the speed at which GitLab operates. But they're more likely to struggle with thinking far out into the future.\n\nTo paraphrase Stephen Covey, there's a big difference between efficiency and effectiveness. A jet flying 1,000 miles per hour is efficient; a jet flying 1,000 miles per hour in the right direction is effective.\n\n#### So if GitLab as an organization is a jet built for speed – where is it going?\n\nSid wants GitLab to help multiply the potential for progress that humanity can drive into the world. \"Our mission is 'Everyone can contribute.' That's a long-term vision. That's 10 years. It means changing all of our culture to read-write. Think Wikipedia. They allow everyone to contribute. Imagine if we can do that. You release a lot of progress. You 10x the progress. [Multipliers like that are] thrown around so easily in Silicon Valley that you have to be cautious. But if you look at 100,000 companies using GitLab, and really being able to get their out software faster. I'm willing to stand behind that.\"\n\nThat means that not only is GitLab thinking about efficiency and effectiveness, but it's also thinking about impact. Impact on the scale of human progress and global culture.\n\nThat's pretty big and pretty far out. So how do they make sure the pilots keep looking way out there on the horizon while flying at supersonic speeds and maneuvring around today's obstacles?\n\nFirst, you set the mission and vision. Everything starts with that mission in mind. Everyone knows it, and Sid talks about it [every chance he gets](https://blog.ycombinator.com/gitlab-distributed-startup/).\n\nNext, you draw that vision back into today's actions with cascading plans. Create a three-to-five-year strategy about how to get there. Craft a yearly plan and [product vision](/blog/gitlabs-2018-product-vision/) – one that's concrete enough that you could show screenshots of what it will look like a year from now. Define quarterly goals (GitLab's [OKRs](/company/okrs/) are public), monthly targets, and smaller sprints to get you there.\n\nThird, you make each of these regular goals highly ambitious, close-in, unambiguous, and concrete. \"Setting high goals pushes people beyond their comfort zone,\" Sid told me. At Y Combinator, he says they taught GitLab that \"20 percent is the new 10 percent.\" That's 20 percent growth, every single week. It's a high number, and it forces them to make completely different types of decisions.\n\nFinally, because the short-term goals are incredibly high, you focus on iteration. [Iteration](https://handbook.gitlab.com/handbook/values/#iteration) is one of GitLab's core values. They define it clearly: \"We do the smallest possible thing and get it out as quickly as possible.\" And they don't just ask developers and designers to work this way. \"We put the whole company on that diet. It made sense for the product. But for marketing, sales, etc., we've gotten them there. If you say 'Grow XYZ in the next two weeks,' you do completely different things. I don't know why that is, but you do.\"\n\n### Encode culture and values to keep the company moving faster.\n\n*How does GitLab do what they do?*\n\nIt was GitLab's strong culture and values orientation that first drew me to them as an organization. I'm often on the lookout for how leaders drive values through their organizations – from Jon Stewart on \"The Daily Show\" to the frontline teams at Starbucks and Zappos.\n\nThe best values-oriented organizations draw explicit links between their values, their competitive advantages, and their daily activities.\n\nHere's where GitLab stands out.\n\nIn just one day of shadowing GitLab's staff, the team talked about values during a product meeting, two interviews with prospective employees, an analyst call and a 1-on-1 with a teammate. The whole team is drawing causal links between what it does (its business activities) and how it does them (the values they live by).\n\n>The whole team is drawing causal links between what it does (its business activities) and how it does them (the values they live by).\n\nSo how does that work? It requires leaders choosing to identify not just the values that matter, but also how to organize around them. Sid told us \"I didn't do a very good job coding GitLab [when he and his co-founders all started back in 2011]. But I think I'm doing a good job coding GitLab the company.\"\n\nAs a remote-only company, \"coding the company\" means (1) writing things down, (2) referencing back to what's been written and (3) reinforcing it through rewards.\n\nAll of this \"GitLab the company\" code is captured in its handbook. The handbook is referenced in almost every conversation. The handbook consists of over 1,000 pages of text. It's a tool that GitLab uses to capture and detail out decisions that have already been made about all of its core business practices – marketing, sales, product, team operations, finance, and more. It's a constant practice for Sid and the team to reference the handbook in meetings, and to send people to look there first before continuing the conversation.\n\nThe values take a prime place in the handbook. There, values are defined, not just described. Words can mean different things in different contexts – and these values indicate a particular thing at GitLab. The definitions are brought to life with 5-15 concrete actions that employees often take for each of the six values. As Sid says, \"The culture got stronger because it is written down. And because it improves and is edited over time.\" And then they're reinforced every day through hiring, coaching, performance reviews and casual conversations.\n\nIt's rare that companies think about linking their values with their competitive advantage. It's rarer still that a company brings its values to life through the day-to-day work. What GitLab has unlocked with its values orientation is not just good and meaningful work. It has also opened the most important competitive advantage in its business model – speed.\n\n>It's rare that companies think about linking their values with their competitive advantage. It's rarer still that a company brings its values to life through the day-to-day work.\n\nIt says it right there in the 'Why have values' section of the handbook: \"Values are a framework for distributed decision-making; they allow you to determine what to do without asking your manager.\" By encoding values deep into everyday activities of the company, everyone on GitLab's team can make decisions faster.\n\nIn DevOps, winning is about getting there first. GitLab coded values right into its organizational design to make sure it could always be the fastest to market.\n\n## Parting thoughts: Will high-efficiency innovation work for you?\n\nAlthough they weren't thinking about large corporations, the oracles of Delphi were right. The most important maxim is to \"know thyself.\" The GitLab prescription isn't right for every company. What's most important is to build a culture of innovation that reflects your strengths and your values.\n\nGitLab is a company of executors, of coders and of people who aren't afraid to work out in the open and make mistakes. They see clear problems. Then they attack. GitLab built a method of innovation that works well for them, but it's not a one-size-fits-all approach. It won't work for everyone, but it might work for you.\n\n#### Here are the questions you should ask:\n\n1. Is the problem you're facing clear to you and your competitors?\n1. Would the people on your team prioritize efficiency over novelty if it'll get you there first?\n1. Do you know how to make trade-offs between what works for your existing customers and what might work better for future customers?\n\nIf you answered yes, pay close attention to what GitLab is doing. Their unrelentingly quick iterative process might be just what the doctor ordered to scale your innovation.\n\nIf not, the GitLab system isn't the right fit for you. You'll want to organize your innovation in a different way.\n\nAs one example, we built Jump to handle an entirely different type of [highly ambiguous problems](https://www.forbes.com/sites/brucerogers/2018/01/25/innovation-leaders-dev-patnaik-co-founder-and-ceo-jump-associates/3/#42518f211238). So it makes sense that some of Jump's values (Passion, Curiosity, Enthusiasm, Intention, Acuity, Initiative and Play) look very much the opposite of GitLab's values (Collaboration, Results, Efficiency, Diversity, Iteration and Transparency).\n\nJump and GitLab are both deeply values-oriented companies with rich and collaborative cultures focused on innovation. And yet we value different things, have different org structures, hire different types of people and work on very different types of problems.\n\nSo what if you're like me and your company's approach or market situation is quite different than GitLab's? Take this as an opportunity to learn from seeing your mirror image.\n\nFirst, test parts of their approach. See what works for you and your team. Then, consider the polar opposites. Find the points where you value distinctly different things, and ask why. Learn why their method works for them, and why it wouldn't work for you. Then flip the script – what's an approach to innovation that GitLab would never do that would be a difference maker for you if you did it?\n\nEither way, take note of what GitLab is doing and how they're doing it. It's amazing, effective, growing like crazy and a great place to work. And ask yourself – should my team be innovating like that?\n\n## About the guest author\n\nJay Newman is Director of Strategy at Jump Associates, a leading strategy and innovation firm. Learn more at [jumpassociates.com](http://www.jumpassociates.com) and connect directly with Jay on [LinkedIn](https://www.linkedin.com/in/jaynewman1).\n\nPhoto by [Karsten Würth](https://unsplash.com/photos/ZKWgoRUYuMk?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[790,696,9],{"slug":2122,"featured":6,"template":699},"high-efficiency-innovation","content:en-us:blog:high-efficiency-innovation.yml","High Efficiency Innovation","en-us/blog/high-efficiency-innovation.yml","en-us/blog/high-efficiency-innovation",{"_path":2128,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2129,"content":2135,"config":2142,"_id":2144,"_type":13,"title":2145,"_source":15,"_file":2146,"_stem":2147,"_extension":18},"/en-us/blog/how-devops-and-gitlab-cicd-enhance-a-frontend-workflow",{"title":2130,"description":2131,"ogTitle":2130,"ogDescription":2131,"noIndex":6,"ogImage":2132,"ogUrl":2133,"ogSiteName":685,"ogType":686,"canonicalUrls":2133,"schema":2134},"How DevOps and GitLab CI/CD enhance a frontend workflow","The GitLab frontend team uses DevOps and CI/CD to ensure code consistency, fast delivery, and simple automation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679026/Blog/Hero%20Images/frontendworkflow.jpg","https://about.gitlab.com/blog/how-devops-and-gitlab-cicd-enhance-a-frontend-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How DevOps and GitLab CI/CD enhance a frontend workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"José Iván Vargas\"}],\n        \"datePublished\": \"2018-08-09\",\n      }",{"title":2130,"description":2131,"authors":2136,"heroImage":2132,"date":2138,"body":2139,"category":832,"tags":2140},[2137],"José Iván Vargas","2018-08-09","\nIt might seem like a lot of what we do on frontend is to make our lives easier,\nbut what I’ve learned in the past two years as a GitLab team-member and a community contributor\nis that if we make our lives easier, we can make a lot of customers happier, too.\nOver the years, I’ve experienced many changes at GitLab, from a change in processes\nto an increase in team members. From an early stage, the frontend team has been\ncommitted to continuous improvements, but working in a rapidly growing team\nrequired an investment in the way we work.\n\nWhen I joined GitLab we still used some of the default conventions that the [Rails\nframework](/blog/upgrade-to-rails5/) recommended for the frontend, and it helped us for quite a while, but\nthe more code we touched, the more code we needed to test and build for\nperformance, making it more challenging for us to maintain. The frontend team\nrealized that we needed a way to facilitate code consistency, fast delivery, and\nsimple automation, so we decided to incorporate [DevOps](/topics/devops/) and\n[CI/CD](/solutions/continuous-integration/) into our workflow.\n\n## Frontend DevOps and CI/CD workflow\n\nWe used CI in a few scenarios, including using linters to help write a consistent\nstyle of code throughout GitLab, but in the case of our JavaScript code, we\nrealized that building for performance and maintainability was becoming\nincreasingly difficult. So, we moved away from the\n[asset pipeline and utilized webpack](/blog/vue-big-plan/),\nwhich has given us a series of benefits. For example,  when we develop locally,\ndebugging code is now a breeze, and the jobs that are frontend related run on\nproduction-bundled code, ensuring a testing environment that closely resembles\nthat of a user.\n\nAfter CI, we publish code using DevOps by hosting it with\n[GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/)). We’ve seen several projects benefit from\nadopting a DevOps model, including\n[GitLab SVG libraries](https://gitlab.com/gitlab-org/gitlab-svgs) and\n[Trello Power-Up](https://docs.gitlab.com/ee/integration/trello_power_up.html).\n\nWhen we created GitLab SVG libraries, we wanted to use them for ourselves and\nmake them available to the general public, so whenever we publish a new version,\nwe use GitLab Pages so that it’s fully automated every time.\n\nWith the Trello Power-Up plugin, we use DevOps to address compatibility\nissues when a new version of Trello is released. GitLab Pages makes it easy to\ndeploy a new version, in a fast and diligent manner, so that it’s accessible in\nthe Trello Marketplace as quickly as possible.\n\n## Frontend DevOps and Data-driven efforts\n\nIncorporating frontend DevOps and CI/CD into the workflow has had a significant\nimpact on efficiency and results. We have greater insight into our operations\nand have metrics to help us detect major areas of improvement. We set up\n[Sitespeed](https://www.sitespeed.io) using Kubernetes to analyze sets of pages\nand provide reports on anything that could hamper our users’ perceived\nperformance, from CSS and JavaScript bundle sizes to accessibility issues and\nthe render time differences between various points in time. The information we gathered using\nSitespeed has helped us improve the merge requests page and identify pages that\nrender slowly. Having more data has changed the way we approach problems at\nGitLab, because we are able to focus our efforts on specific areas.\n\n## The unexpected discovery of problems\n\nOne of the unexpected benefits of our workflow is the discovery of problems that\nwe may not have identified.\n\n### A lack of automation\n\nWe realized, for example, that we lack some automation in our tools. For\ninstance, every time we didn’t format code in a specific way, our linter\nnotified us, but analyzing and fixing the code slowed down developer velocity,\nso we decided to add [Prettier](https://prettier.io/) to format our code in our\nmerge requests for us. We also realized that, sometimes, we need a little bit of\nautomation when we publish code. As an all-remote company, many of us work on\npublic WiFi, and we found that unreliable connections could have detrimental\neffects while deploying code. The combination of CI and DevOps made deployments\neasier. If we triggered a pipeline and a coffee shop WiFi goes vamoose, it\ndoesn't matter. We already automated a significant part of our development\nprocess, but we’re always striving for more.\n\n### A lack of speed\n\nIn the case of CI, we noticed that our own tools can be a source of problems. We\nfound that we didn’t make the necessary considerations to keep our test suite fast.\nAs developers, we want to go back to developing as fast as possible. A few of my\nteammates discovered that our test runs were becoming slower and slower with each\nrelease. Even though these are not customer-facing changes, it has made both\nproduct managers and team managers consider investing in those issues, because\nthe easier the development cycle is for the developers involved, the better it\nis for our customers, since we can deliver even more features. Furthermore, we\ncan prevent regressions from happening by having solid foundations, such as\ntesting, code style, and code formatting.\n\nEvery time we discover problems that affect us or our work, we realize that we\ncan also jeopardize the features and experiences we want to deliver to our\ncustomers. It has changed the culture inside the team, because we view\nperformance issues as developers rather than as GitLab team-members.\n\n## Advice to frontend teams\n\nUsing DevOps and CI/CD in a frontend workflow is compatible with teams of any\nsize, including small teams that may want to ensure that their code styling is\nthe same.\n\n### Put a linter in place\n\nWith CI, the smallest and perhaps one of the most significant steps is\nto put a linter in place, and if the pipeline doesn't pass, you can’t merge the\ncode. That's such a simple, effective way to improve your code and to keep it\ntidy and clean in the long run. Just setting up some simple steps using CI will\nimprove your team’s code and your developers’ quality of life so that they don't\nhave to worry about combing through past code. Even though small teams might not\nfind the value in the short term, when they scale, they certainly will.\n\n### Create consistent scenarios\n\nThe bigger the project, the more you realize that some of your tooling ends up\nrunning locally, and it's beneficial to run it on CI. If something doesn't work\non a generic type of machine that has enough dependencies installed to run your\nCI setup, that means there’s something wrong and that you should probably fix it\nbefore merging your code. As long as you can create a consistent scenario in which\nyou can do things like testing and linting, you should be in a good position to\ndeliver a great product.\n\n### Select CI-compatible tools\n\nFor teams of all sizes, it’s important that the tools you select as part of your\nworkflow are compatible with CI in some way, so that even if you had a big part\nof your workflow running locally, you can easily move to CI by creating a pipeline\nthat resembles that of your daily workflow. Regardless of the tool that you choose,\ncreating a job for it will return a lot of value in the long run. If it makes\nsense, I encourage you to add it, because there’s very little incentive not to.\nCI-compatible tools include tests runners, linters, Prettier, or any custom-made\ntools that help you in some way. One decision you might want to avoid is creating\non servers that live on CI runners. Since they only run for a limited amount of\ntime, these servers will stop existing. You could also add deployments to your\nCI workflow, helping you with DevOps and preventing you from worrying about\ncomplicated local setups for new developers. The possibilities are huge.\n\n### Add performance testing\n\nTo add to the pool of possibilities, why not add performance testing to your\nmerge requests with a tool such as\n[Lighthouse](https://developers.google.com/web/tools/lighthouse/), which can\nhelp you understand potential performance bottlenecks in your website. Or, maybe\nyour team can add the ability to generate code documentation and publish it via\nGitLab Pages. CI/CD can be a really good tool, because it will return something\nimmediately. It's just a matter of how you want to use it, depending on your needs.\n\nThe more the frontend team uses CI and DevOps, the more we discover ways to use\nit, so it’s worth it to us to invest in this tool.\n\nSometimes, we just want to\nget stuff out there without too much consideration for tooling and CI and CD,\nbut because of the benefits we’ve experienced, we now include CI/CD in all of\nour projects. With GitLab, everything is integrated, so why skip it? Instead of\nfighting against automation, I encourage teams to embrace the idea that CI is\nthere to help you.\n\n[Cover image](https://unsplash.com/photos/UbGqwmzQqZM) by\n[Zhipeng Ya](https://unsplash.com/photos/UbGqwmzQqZM?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText), licensed\nunder [CC X](https://unsplash.com/license).\n{: .note}\n",[2141,9,2025,722],"frontend",{"slug":2143,"featured":6,"template":699},"how-devops-and-gitlab-cicd-enhance-a-frontend-workflow","content:en-us:blog:how-devops-and-gitlab-cicd-enhance-a-frontend-workflow.yml","How Devops And Gitlab Cicd Enhance A Frontend Workflow","en-us/blog/how-devops-and-gitlab-cicd-enhance-a-frontend-workflow.yml","en-us/blog/how-devops-and-gitlab-cicd-enhance-a-frontend-workflow",{"_path":2149,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2150,"content":2156,"config":2162,"_id":2164,"_type":13,"title":2165,"_source":15,"_file":2166,"_stem":2167,"_extension":18},"/en-us/blog/how-do-we-handle-engineering-led-initiatives-that-dont-belong-to-one-team",{"title":2151,"description":2152,"ogTitle":2151,"ogDescription":2152,"noIndex":6,"ogImage":2153,"ogUrl":2154,"ogSiteName":685,"ogType":686,"canonicalUrls":2154,"schema":2155},"How do we handle engineering-led issues that don't belong to one team?","A recent issue sparked a lively discussion between engineering and product leadership about how 'cross-vertical' issues should be prioritized to avoid the bystander effect.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678916/Blog/Hero%20Images/how-do-we-handle-engineering-led-initiatives.jpg","https://about.gitlab.com/blog/how-do-we-handle-engineering-led-initiatives-that-dont-belong-to-one-team","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How do we handle engineering-led issues that don't belong to one team?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily von Hoffmann\"}],\n        \"datePublished\": \"2018-10-30\",\n      }",{"title":2151,"description":2152,"authors":2157,"heroImage":2153,"date":2159,"body":2160,"category":300,"tags":2161},[2158],"Emily von Hoffmann","2018-10-30","\nThe GitLab engineering team is split according to [product category](https://handbook.gitlab.com/handbook/product/categories/), so that team members in each category can [focus, specialize, and collaborate](/blog/configure-post/) on the same issues at the same time. They are semi-siloed by design, so what happens to issues, like tech debt, that are everyone and no one’s responsibility?\n\nThe short answer is, teams are still figuring it out. A recent [issue](https://gitlab.com/gitlab-org/gitlab-ce/issues/52150) sparked a lively discussion and video call, which you can watch below. Listen in below on the discussion between engineering and product leadership about how technical debt or other engineering initiatives that are \"cross-vertical\" (that is, touch on many different product areas) should be prioritized given that there isn't one clear point of contact or responsibility for those issues.\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://www.youtube.com/embed/3ZEI4W_Cb2g\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n### The gist\n\nThe issue that started it all had to do with a task that would have been assigned to the former Platform team, which used to be a catch-all that has since been split up into Create and Manage. Engineering Manager, Create [Douwe Maan](/company/team/#DouweM) explains, “With all backend teams now focused on specific product areas... there is no team to take on these kinds of backend-wide, non-product-area specific issues anymore.”\n\nHe continues, “Issues like this affect all backend teams equally, so we fall prey to the bystander effect. When an engineering manager gets to make room in a given release for an engineering-led initiative, they have the choice between issues like this, that any team could pick up, and product area-specific issues, that aren't going to get done unless their team does it, so the latter will have a far higher chance of being picked. Everyone cares about these kinds of issues, which means no one cares... there are many issues (technical debt and otherwise) that aren't currently anyone's responsibility, so they won't get done.”\n\nThis felt like a recurring problem due to other recent examples of cross-vertical initiatives stalling, like this issue to [switch to Rails 5 in production](https://gitlab.com/gitlab-org/gitlab-ce/issues/48991), and this issue to [update GitLab's referrer policy](https://gitlab.com/gitlab-org/gitlab-ce/issues/39147).\n\n### The research\n\nWe've heard from our community that this is a common problem, especially when working with others in different functions. In [recent interviews](https://drive.google.com/file/d/1A5mSNoPJydjcWKE4rdO2287sjnABxGDA/view) with 15 DevOps engineers, many expressed their frustration at the amount of reactive work and rework that they face, and identified a lack of successful coordination and empathy between different teams as the culprit. One interviewee said he thought this is inherent to working with some functions. Because of how release schedules work for developers and security engineers, he thinks these groups are the least likely to feel they are able to assign cycles to some proactive tasks, like fixing technical debt before it's critical.\n\nThe nearly 20 [software engineers](https://drive.google.com/file/d/1EVrjVcgIBbuNf4Gwenajsiy6Wv9HsTJw/view) we [interviewed](https://drive.google.com/file/d/15GksPiH0xmy4nRhylhMDIWmuvdHMWof4/view) also brought up their frustration at the way that technical debt can transform a seemingly simple task into a massive effort requiring them to rewrite or refactor a large chunk of code. More than the time spent on these tasks, several developers mentioned their concern that others might see them as dragging their feet and becoming a blocker when they take the time to resolve the technical debt. After all, it was just \"a simple task.\"\n\nThe responsibility to fix these issues becomes even more muddied when no particular team owns them. One [study of 95 teams in 25 leading corporations found that the majority of cross-functional teams are dysfunctional](https://hbr.org/2015/06/75-of-cross-functional-teams-are-dysfunctional), in large part because siloes self-perpetuate. The authors argue the solution is to create a “Portfolio Governance Team (PGT), where high-level leaders make complex decisions on the various projects in their portfolio together.\" The number one rule for making a PGT successful? \"Every project should have an end-to-end accountable leader.\"\n\n### The fix\n\nAlong these lines, one long-term solution being discussed at GitLab is establishing a dedicated team that will transcend the product areas and be responsible for these murky in-between issues. But Director of Engineering, Dev Backend [Tommy Morgan](/company/team/#itstommymorgan) adds, “Even if we had a team that was in place to handle issues like this one, there will always be boundary conditions. As Product is responsible for prioritizing work, if we need to do any horse-trading or other determination to figure out where the work should land, I think that's something that Product should work out.”\n\nShort of creating a new team, Product Managers and Engineering Managers will need to frankly discuss their own priorities and incentives in order to get these tasks scheduled.\n\nWhat has your org tried? Is it working? Tweet us [@gitlab](https://twitter.com/gitlab).\n\n[Photo](https://unsplash.com/photos/fIq0tET6llw) by [Diego PH](https://unsplash.com/@jdiegoph) on Unsplash.\n{: .note}\n",[696,722,790,9],{"slug":2163,"featured":6,"template":699},"how-do-we-handle-engineering-led-initiatives-that-dont-belong-to-one-team","content:en-us:blog:how-do-we-handle-engineering-led-initiatives-that-dont-belong-to-one-team.yml","How Do We Handle Engineering Led Initiatives That Dont Belong To One Team","en-us/blog/how-do-we-handle-engineering-led-initiatives-that-dont-belong-to-one-team.yml","en-us/blog/how-do-we-handle-engineering-led-initiatives-that-dont-belong-to-one-team",{"_path":2169,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2170,"content":2176,"config":2181,"_id":2183,"_type":13,"title":2184,"_source":15,"_file":2185,"_stem":2186,"_extension":18},"/en-us/blog/how-gitlab-handles-retrospectives",{"title":2171,"description":2172,"ogTitle":2171,"ogDescription":2172,"noIndex":6,"ogImage":2173,"ogUrl":2174,"ogSiteName":685,"ogType":686,"canonicalUrls":2174,"schema":2175},"How GitLab handles retrospectives","Take a peek at how the GitLab team holds monthly retrospectives.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668426/Blog/Hero%20Images/retrospectivesgitlabpost.jpg","https://about.gitlab.com/blog/how-gitlab-handles-retrospectives","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab handles retrospectives\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2019-12-19\",\n      }",{"title":2171,"description":2172,"authors":2177,"heroImage":2173,"date":2178,"body":2179,"category":300,"tags":2180},[852],"2019-12-19","\nEach month, GitLab’s engineering team hold a retrospective to learn and improve as much as possible from [every monthly release](/releases/). Retrospectives are the preferred method for GitLab team members [focused on improvement](https://handbook.gitlab.com/handbook/values/#focus-on-improvement) and ensures that our technical debt doesn’t grow faster than our code base.\n\n> “When we say retrospective, here’s what we have in mind: A special meeting where the team gathers after completing an increment of work to inspect and adapt their methods and teamwork. Retrospectives enable whole-team learning, act as catalysts for change, and gener-ate action. Retrospectives go beyond checklist project audits or per-functory project closeouts. And, in contrast to traditional post-mortems or project reviews, retrospectives focus not only on the development process, but on the team and team issues. And team issues are as challenging as technical issues – if not more so.” — [Agile retrospectives: Making good teams great](https://www.amazon.com/Agile-Retrospectives-Making-Pragmatic-Programmers-ebook/dp/B00B03SRJW)\n\nSince retrospectives can cultivate a culture of transparency, trust, collaboration, and communication, we want to share the steps our team takes in order to ship every month.\n\n## Engineering-wide retrospectives\n\nIn line with our [value of transparency](https://handbook.gitlab.com/handbook/values/#transparency), we livestream the meetings to YouTube and monitor the chat for questions from viewers. We also have a publicly available document for [retrospective notes](https://docs.google.com/document/d/1nEkM_7Dj4bT21GJy0Ut3By76FZqCfLBmFQNVThmW2TY/edit?usp=sharing) so that we can [efficiently refer back to decisions, insight, and comments](https://handbook.gitlab.com/handbook/values/#write-things-down).\n\nIn each retrospective, the team methodically works through the same discussion points:\n\n1. **Previous retrospective improvement tasks**: The moderator reviews the improvements the team identified in the last retrospective and discuss progress on those items.\n1. **What went well this month**: Teams are encouraged to celebrate the ways in which we exceeded expectations either individually or as a team.\n1. **What went wrong this month**: Teams are encouraged to identify mistakes and unmet goals. The focus is to highlight areas that didn’t meet our expectations as a team.\n1. **How we can improve**: The team engages in [blameless problem solving](https://handbook.gitlab.com/handbook/values/#blameless-problem-solving) to identify how subsequent releases can improve. Are there changes we can make in workflow? Is there a potential silo forming? Do changes need to be made in communication and collaboration?\n1. **Improvements for next release to track**: At the end of each retrospective, the [Engineering Productivity team](/handbook/engineering/quality/#engineering-productivity) triages improvement items identified during the retrospective. Having a single owner of triaging enables the awareness of the bigger picture technical debt and backstage work required. The individuals issues are assigned to other teams or engineers to execute.\n\nRetrospectives are publicly live streamed each month. Take a look at the retrospective for 12.5. 🍿\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/tkwo9xisg90\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Team retrospectives\n\n[Team retrospectives](/handbook/engineering/management/group-retrospectives/) are held to inform the function-wide retrospective for any given release. [Sean McGivern](/company/team/#smcgivern), engineering manager, Plan:Project Management, wrote a [great article on using GitLab CI to automate monthly releases](/blog/how-we-used-gitlab-to-automate-our-monthly-retrospectives/). Using scheduled pipelines to create an issue early in the release cycle, teams can contribute to the retro issue while they’re still working on the release.\n\n> “It doesn’t matter whether you have four or five labels of things on your retro board, or exactly how you do the retro. What does matter is the notion of thinking about what we're doing and how we can do better, and it is the team that’s doing the work that does this, that is the central thing.” — [Martin Fowler](https://martinfowler.com/articles/agile-aus-2018.html)\n\nIn this [team retrospective issue](https://gitlab.com/gl-retrospectives/create-stage/editor/issues/6), the Create:Editor team takes a look at what went well, what didn’t go well, what can be improved, which issues shipped, and which issues slipped in the [12.5 release](/blog/gitlab-12-5-released). In team retro issues, individuals can gauge how others experienced the release and discuss points raised by teammates.\n\nHere’s a video of the [Plan team](https://handbook.gitlab.com/handbook/product/categories/#plan-stage) holding a retrospective to discuss recent slipped issues. 🍿\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/QA3LlJOi0Ik\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Asynchronous retrospectives\n\nSince GitLab is an [all-remote company](/company/culture/all-remote/), we strongly encourage [asynchronous communication](https://handbook.gitlab.com/handbook/communication/#internal-communication), since team members can be located in any of the [65 countries](/company/team/) where GitLab team members live. We apply asynchronous communication to our retrospectives to ensure that everyone can contribute and document their experiences.\n\nAsynchronous retrospectives are not just for remote teams. They can also be used by colocated teams to facilitate open discussion when team members have the bandwidth to dedicate to problem solving. Furthermore, asynch retros can serve as a dedicated place where people can quickly jot down their thoughts when they suddenly remember an experience, rather than be forced to remember everything during a dedicated call.\n\n## Retrospectives: The impact on culture\n\n[When retrospectives are run efficiently](/handbook/engineering/management/group-retrospectives/), these meetings can build mutual trust, communication, and collaboration. They cultivate a culture of continuous learning and improvement, two characteristics of any strong Agile team. The more teams can determine how to efficiently build software, the more value they can bring to users.\n\nCover image by [Shane Aldendorff](https://unsplash.com/@pluyar?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/mQHEgroKw2k).\n{: .note}\n",[744,696,9],{"slug":2182,"featured":6,"template":699},"how-gitlab-handles-retrospectives","content:en-us:blog:how-gitlab-handles-retrospectives.yml","How Gitlab Handles Retrospectives","en-us/blog/how-gitlab-handles-retrospectives.yml","en-us/blog/how-gitlab-handles-retrospectives",{"_path":2188,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2189,"content":2195,"config":2201,"_id":2203,"_type":13,"title":2204,"_source":15,"_file":2205,"_stem":2206,"_extension":18},"/en-us/blog/how-to-automate-creation-of-runners",{"title":2190,"description":2191,"ogTitle":2190,"ogDescription":2191,"noIndex":6,"ogImage":2192,"ogUrl":2193,"ogSiteName":685,"ogType":686,"canonicalUrls":2193,"schema":2194},"How to automate the creation of GitLab Runners","Follow this step-by-step guide for automating runner setup using new runner creation workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664087/Blog/Hero%20Images/tanukicover.jpg","https://about.gitlab.com/blog/how-to-automate-creation-of-runners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to automate the creation of GitLab Runners\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2023-07-06\",\n      }",{"title":2190,"description":2191,"authors":2196,"heroImage":2192,"date":2198,"body":2199,"category":832,"tags":2200},[2197],"Darren Eastman","2023-07-06","Automating the creation of GitLab Runners is an essential tactic in\noptimizing the operations and management of a runner fleet. Since announcing\nthe [deprecation and planned removal of the legacy runner registration\ntoken](https://docs.gitlab.com/ee/architecture/blueprints/runner_tokens/#next-gitlab-runner-token-architecture)\nlast year, there have been various questions by customers and the user\ncommunity regarding the impact of the new workflow on any automation they\nrely on for creating and registering runners. This is a step-by-step guide\nfor automating runner setup using the new runner creation workflows as\ndepicted in the sequence diagram.\n\n\n![GitLab Runner create - sequence\ndiagram](https://about.gitlab.com/images/blogimages/2023-06-19-how-to-automate-creating-runners/runner_create_sequence_diagram.png){:\n.shadow}\n\n\n## New terminology and concepts\n\nBefore we dive into the automation steps, let’s first review a few new\nconcepts with the runner creation process and how that differs from the\nregistration token-based method. With the `registration token` method, a\n`registration token` is available for the instance, for each group, and for\neach project. Therefore, in a large GitLab installation, with many groups,\nsub-groups, and projects, you can have tens of hundreds of registration\ntokens that any authorized user can use to connect a runner. There are two\nsteps to authorizing a runner (the application that you install on a target\ncomputing platform) to a GitLab instance:\n\n1. Retrieve a registration token.\n\n2. Run the register command in the runner application using the previously\nretrieved registration token.\n\n\nThe workflow images below depict the runner setup steps using the\nregistration token compared with the new runner creation process.\n\n\n![GitLab Runner registration\nworkflows](https://about.gitlab.com/images/blogimages/2023-06-19-how-to-automate-creating-runners/runner_registration_workflows.png){:\n.shadow}\n\n\n### Reusable runner configurations\n\nNow, in the registration token method, if you authenticated multiple runners\nusing the same registration token (a valid use case), each runner entity\nwould be visible in the UI in a separate row in the list view. The new\ncreation method introduces the concept of a reusable runner configuration.\nFor example, if you have to deploy multiple runners at the instance level,\neach with the same configuration (executor type, tags, etc.), you simply\ncreate a runner and configuration **once**, then register each individual\nrunner with the same authentication token that you retrieved from the first\nrunner creation. Each of these runners is now displayed in the UI in a\nnested hierarchy.\n\n\n![Runner detailed view with shared\nconfigurations](https://about.gitlab.com/images/blogimages/2023-06-19-how-to-automate-creating-runners/runner_detail_shared_configs.png){:\n.shadow}\n\n\nWe heard from many of you that your Runners view was cluttered because each\nrunner created received its own row in the table, even if they were the\nexact same configuration as 100 others. With this change, our intent is to\nensure that you have the flexibility you need to configure a runner fleet at\nscale while ensuring that you can still easily understand and manage the\nfleet in the GitLab Runners view. We understand that this is a paradigm\nshift that may take some getting used to.\n\n\n## Automation steps for creating a runner\n\nHere are the automation steps to create a runner.\n\n\n### Step 1: Create an access token\n\nYou will first need to create an access token. A [personal access\ntoken](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html)\nfor an administrator account will allow you to create runners at the\ninstance, group, and project levels.\n\n\nIf you only need to create a group or project runner, then it is best to use\na group access token or project access token, respectively. For a group or\nproject, navigate to `Settings / Access Tokens` and create a token. You must\nspecify a name, the token expiration date, role, and scope. For the role,\nselect `Owner`; for the scopes, select `create_runner`.\n\n\nNote: The access token is only visible once in the UI. You will need to\nstore this token in a secure location - for example, a secrets management\nsolution such as [Hashicorp\nVault](https://docs.gitlab.com/ee/ci/examples/authenticating-with-hashicorp-vault/)\nor the [Keeper Secrets Manager Terraform\nplugin](https://docs.keeper.io/secrets-manager/secrets-manager/integrations/terraform).\n\n\n![GitLab Runner registration\nworkflows](https://about.gitlab.com/images/blogimages/2023-06-19-how-to-automate-creating-runners/project_access_token.png){:\n.shadow}\n\n\n### Step 2: Use the access token to create a runner in the GitLab instance\n\nNow that you have an access token scoped to the instance, group, or project,\nthe next step is to use that token to create a runner automatically. In this\nexample, we will simply invoke a POST REST endpoint in a terminal using\nCURL.\n\n\n```\n\ncurl -sX POST https://gitlab.example.com/api/v4/user/runners --data\nrunner_type=group_type --data \"group_id=\u003Ctarget_group_or_project_id>\" --data\n\"description=software-eng-docker-builds-runner\" --data \"tag_list=\u003Cyour\ncomma-separated tags>\" --header \"PRIVATE-TOKEN: \u003Cyour_access_token>\"\n\n```\n\n\nOnce this step is complete, the newly created runner configuration is\nvisible in the GitLab UI. As the actual runner has not yet been configured,\nthe status displayed is `Never contacted`.\n\n\nThe API will return a message with the following fields: `id`, `token`, and\n`token_expires_at`. You must save the value for the `token` as it will only\nbe displayed once. \n\n\nAs mentioned above, a critical point to note in the new runner creation is\nthat you can reuse the runner token value to register multiple runners. If\nyou choose to do that, runners created with the same token will be grouped\nin the Runners list. Whichever runner contacted GitLab most recently will be\nthe one whose unique data (IP address, version, last contact time and\nstatus) displays in the list. You can still view all the runners in that\ngroup _and_ compare all of their unique data by going to the details page\nfor that runner. Each runner in the group is uniquely identified by their\n`system_id`.\n\n\nAt this point, you might ask yourself, what’s the difference between this\nnew workflow and the workflow that relies on the registration token? The\nbenefits are:\n\n1. You can now quickly identify the user that created a runner\nconfiguration. Not only does this add a layer of security compared to the\nold method, but it also simplifies troubleshooting runner performance\nissues, especially when your fleet expands.\n\n1. Only the creator of the runner or administrator(s) can edit crucial\nconfiguration details like tags, the ability to run untagged jobs, the\nsetting to lock to only run jobs in the current projects it is shared with,\nand more.\n\n\n## Automation of runner install and registration\n\nWith the runner configuration creation steps completed, you now have a\nrunner or runners configured in your GitLab instance and valid runner tokens\nthat you can use to register a runner. You can manually install the runner\napplication to a target compute host or automate the runner application\ninstallation. If you plan to host the runner on a public cloud virtual\nmachine instance – for example, [Google Cloud Compute\nEngine](https://cloud.google.com/compute/docs/instances) – then a good\n[example\npattern](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/1932#note_1172713979)\nprovided by one of our customers for automating the runner install and\nregistration process is as follows:\n\n1. Use [Terraform infrastructure as\ncode](https://docs.gitlab.com/ee/user/infrastructure/iac/) to install the\nrunner application to a virtual machine hosted on GCP.\n\n1. Use the [GCP Terraform\nprovider](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_instance)\nand specifically the `metadata` key to automatically add the runner\nauthentication token to the runner configuration file on the newly created\nGCP virtual machine.\n\n1. Register the newly installed runner with the target GitLab instance using\na [cloud-init](https://cloudinit.readthedocs.io/en/latest/index.html#)\nscript populated from the GCP terraform provider.\n\n\n**Example cloud-init script**\n\n\n```shell\n\n#!/bin/bash\n\napt update\n\n\ncurl -L\n\"https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh\"\n| bash\n\nGL_NAME=$(curl 169.254.169.254/computeMetadata/v1/instance/name -H\n\"Metadata-Flavor:Google\")\n\nGL_EXECUTOR=$(curl\n169.254.169.254/computeMetadata/v1/instance/attributes/gl_executor -H\n\"Metadata-Flavor:Google\")\n\napt update\n\napt install -y gitlab-runner\n\ngitlab-runner register --non-interactive --name=\"$GL_NAME\"\n--url=\"https://gitlab.com\" --token=\"$RUNNER_TOKEN\"\n--request-concurrency=\"12\" --executor=\"$GL_EXECUTOR\"\n--docker-image=\"alpine:latest\"\n\nsystemctl restart gitlab-runner\n\n```\n\n\n## What's next?\n\nSo there you have it, an overview of how to automate runner creation,\ninstallation, and registration. To summarize in three simple steps:\n\n1. Use the API to create a runner token and configuration.\n\n1. Store the retrieved authentication token in a secrets management\nsolution.\n\n1. Use infrastructure as code to install the runner application on a target\ncompute host.\n\n\nOur long-term vision is to directly incorporate this automation lifecycle\ninto the product to simplify your day-to-day runner fleet management\noperations.\n",[1035,9,1074],{"slug":2202,"featured":6,"template":699},"how-to-automate-creation-of-runners","content:en-us:blog:how-to-automate-creation-of-runners.yml","How To Automate Creation Of Runners","en-us/blog/how-to-automate-creation-of-runners.yml","en-us/blog/how-to-automate-creation-of-runners",{"_path":2208,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2209,"content":2215,"config":2222,"_id":2224,"_type":13,"title":2225,"_source":15,"_file":2226,"_stem":2227,"_extension":18},"/en-us/blog/how-to-avoid-broken-master-with-pipelines-for-merge-requests",{"title":2210,"description":2211,"ogTitle":2210,"ogDescription":2211,"noIndex":6,"ogImage":2212,"ogUrl":2213,"ogSiteName":685,"ogType":686,"canonicalUrls":2213,"schema":2214},"How to prevent broken master with merge trains & pipelines","Do you still run pipelines on source branches? Let's start running them on merge commits!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678366/Blog/Hero%20Images/merge-train.jpg","https://about.gitlab.com/blog/how-to-avoid-broken-master-with-pipelines-for-merge-requests","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to avoid broken master with Pipelines for Merged Results and Merge Trains\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Shinya Maeda\"}],\n        \"datePublished\": \"2019-09-11\",\n      }",{"title":2216,"description":2211,"authors":2217,"heroImage":2212,"date":2219,"body":2220,"category":832,"tags":2221},"How to avoid broken master with Pipelines for Merged Results and Merge Trains",[2218],"Shinya Maeda","2019-09-11","\nBroken master. This can happen when CI pipelines run on the master branch (or default branch), but don't\npass all tests. A red cross mark is shown in the project's top page, signalling unstable source\ncode and eroding the trust of users. Broken master could also be a blocker against\na continuous deployment/delivery stream line in which deployment jobs\nare executed after the test stage passed in master pipelines.\n\nAll maintainers want to avoid this critical state,\nbut how can we prevent it?\n\n## Let's look at how master is broken in the first place\n\nLet's say you're one of the maintainers of a project. It's a busy repository with hundreds of merges\nto master every day. A developer assigns a merge request (MR) to you. The MR passed all of the tests in the CI pipelines,\nhas been reviewed thoroughly by code reviewers, all open discussions have been resolved, and the MR has been\napproved by the relevant [code owners](https://docs.gitlab.com/ee/user/project/codeowners/).\n\nYou would press the \"Merge\" button without a second thought, but how are you confident that\na pipeline running on master branch after the merge will pass all tests again?\nIf your answer is \"It might break the master branch,\" then\nyou're right. This could happen, for example, if master has advanced by some\nnew commits, and one of them changed a lint rule. The MR in question\nstill contains an invalid coding style, but the latest pipeline on the MR passes,\nbecause the feature branch is based on an old version of master.\n\nEnter two new GitLab features: [Pipelines for Merged Results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html)\nand [Merge Trains](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/index.html).\nLet me show you how they works and how to enable them.\n\n## How to continually run CI pipelines on the merge commit\n\nLet's break down what went wrong in the scenario above. Even though the pipeline on the\nmerge request passed all the tests, it ran on a source (feature) branch\nwhich could be based on an outdated version of master. In such a case,\nthe result of pipeline is considered as _untrusted_, because there may be a huge difference\nbetween an actual-and-future merge commit and the commit in question.\n\nAs a [boring solution](https://handbook.gitlab.com/handbook/values/#boring-solutions), developers can continually rebase their MR\non the latest master, but this is annoying and inefficient, given the speed of\ngrowth of the master branch.\nIt causes a lot of friction between developers and maintainers, slowing down the development cycle.\n\nTo address this problem, we introduced [Pipelines for Merged Results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html)\nin [GitLab 11.10](/releases/2019/04/22/gitlab-11-10-released/#pipelines-for-merged-results).\n\nSimply put, the main difference between pipelines for merged results and normal pipelines is that\n**pipelines run on merge commits, instead of source branches, before the actual merge happens**.\nThis merge commit is generated from the latest commits of target branch and\nsource branch and written in a temporary place (`refs/merge-requests/:iid/merge`).\nTherefore, we can run a pipeline on it without interfering with master.\n\nHere is a sample workflow with the above scenario:\n\n1. A developer pushes a new commit to a merge request.\n1. GitLab creates a merge commit from the HEAD of the source branch and HEAD of the target branch.\n   This merge commit is written in `refs/merge-requests/:iid/merge` and does not change commit history of master branch.\n1. GitLab creates a pipeline on the merge commit, but this pipeline fails because the latest master changed a lint rule.\n1. A maintainer sees a failed pipeline in the merge request.\n\nAs you can see, the maintainer was able to hold off merging the dangerous MR\nbecause the latest pipeline on the MR didn't pass. The feature actually saved\nmaster from a broken state.\n\nAs a bonus, this workflow freeds developers from continual\nrebasing of their merge requests.\nAll they need to do is develop features with [Pipelines for Merged Results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html).\nGitLab automatically creates an expected merge commit and validates the merge request prior to\nan actual merge.\n\n### How to get started with Pipelines for Merged Results\n\nYou can [start using this feature](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html#enabling-pipelines-for-merged-results)\ntoday, with just two steps:\n\n1. Edit the `.gitlab-ci.yml` config file to enable [pipelines for merge requests / merge request pipelines](https://docs.gitlab.com/ee/ci/merge_request_pipelines/).\n1. Enable the \"Merge pipelines will try to validate the post-merge result prior to merging\" option at **Settings > General > Merge requests** in your project.\n\n**Note:** If the configurations in your `.gitlab-ci.yml` file are too complex, you might stumble at the first point.\nWe're currently working on [improving the usability of pipelines for merge requests / merge request pipelines](https://gitlab.com/gitlab-org/gitlab-ce/issues/60085).\nPlease leave your feedback in the issue if that's the case.\n\n## How to avoid race condition of concurrent merges\n\nWith [Pipelines for Merged Results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html),\nwe can confidently say that MRs are continually tested against the latest master branch.\nHowever, what if multiple MRs have been merged at the same time?\nFor example:\n\n- There are two merge requests: MR-1 and MR-2. The latest pipelines have already passed in both MRs.\n- John (maintainer) and Cathy (maintainer) merge MR-1 and MR-2 at the same time, respectively.\n\nLater on, it turns out that MR-2 contains a coding offence which has just been introduced by MR-1.\nMaintainers hit merge without knowing that, and\nneedless to say, this will result in broken master. How can we handle this race condition properly?\n\nIn [GitLab 12.1](/releases/2019/07/22/gitlab-12-1-released/#parallel-execution-strategy-for-merge-trains), we introduced a new feature,\n[Merge Trains](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/).\nBasically, a Merge Train is a queueing system that allows you to avoid this kind\nof race condition.\nAll you need to do is add merge requests to the merge train, and it\nhandles the rest of the work for you.\nIt creates merge commits according\nto the sequence of merge requests and runs pipelines on the expected merge commits.\nFor example, John and Cathy could have avoided broken master with the following workflow:\n\n1. John and Cathy add MR-1 and MR-2 to their [Merge Train](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/), respectively.\n1. In MR-1, the Merge Train creates an expected merge commit from HEAD of the source branch and HEAD of the target branch.\n   It creates a pipeline on the merge commit.\n1. In MR-2, the Merge Train creates an expected merge commit from HEAD of the source branch and the expected merge commit of MR-1.\n   It creates a pipeline on the merge commit.\n1. The pipeline in MR-1 passes all tests and merged into master branch.\n1. The pipeline in MR-2 fails because it violates a lint check which was changed by MR-1. MR-2 is dropped from the Merge Train.\n1. Developer revisits MR-2, fixes the coding offence, and asks Cathy to add it to the Merge Train again.\n\nAs you can see, the Merge Train successfully rejected MR-2 before it could break the master\nbranch. With this workflow, maintainers can feel more confident when they\ndecide to merge something. Also, this doesn't slow down development lifecycle\nthat pipelines are built on optimistic assumption that, in the above case,\nthe pipeline in MR-1 and the pipeline in MR-2 **start almost simultaneously**.\nMR-2 builds a merge commit as if MR-1 has already been merged, so that maintainers\ndon't need to wait for long time until each pipeline finished. If one of the\npipelines failed, the problematic merge request is dropped from the merge train\nand the train will be reconstructed without it.\n\n### How to get started with Merge Trains\n\nYou can [start using Merge Train](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/index.html#how-to-add-a-merge-request-to-a-merge-train)\ntoday, if you've already enabled [Pipelines for merged results](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/index.html#enabling-pipelines-for-merged-results). Click [\"Start/Add merge train\" button](https://docs.gitlab.com/ee/ci/merge_request_pipelines/pipelines_for_merged_results/merge_trains/index.html#how-to-add-a-merge-request-to-a-merge-train) in merge requests.\n\n## A quick demonstration of Merge Trains\n\nHere is a demonstration video that explains the advantage of Merge Train feature.\nIn this video, we'll simulate the common problem in a workflow without\nMerge Trains, and later, we resolve the problem by enabling a Merge Train.\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://www.youtube.com/embed/D4qCqXgZkHQ\" frameborder=\"0\" allowfullscreen=\"true\">\n\u003C/iframe>\n\u003C/figure>\n\n## Wrap up\n\nRunning pipelines on expected merge commits allows us to predict what will happen\nin the future and avoid broken master proactively. It soothes the headache of\nrelease managers and gives maintainers and developers more confidence that their code\nis reliable enough to be merged and shipped. In addition, Merge Trains allow you\nto merge things safely without slowing down the development cycle.\n\nGive this advanced CI/CD feature a try today!\n\nFor more information, check out [the documentation on merge trains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html) and [pipelines for merge requests / merge request pipelines](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html).\n\nCover image by [Dan Roizer](https://unsplash.com/@danny159) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[108,974,9,834],{"slug":2223,"featured":6,"template":699},"how-to-avoid-broken-master-with-pipelines-for-merge-requests","content:en-us:blog:how-to-avoid-broken-master-with-pipelines-for-merge-requests.yml","How To Avoid Broken Master With Pipelines For Merge Requests","en-us/blog/how-to-avoid-broken-master-with-pipelines-for-merge-requests.yml","en-us/blog/how-to-avoid-broken-master-with-pipelines-for-merge-requests",{"_path":2229,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2230,"content":2236,"config":2241,"_id":2243,"_type":13,"title":2244,"_source":15,"_file":2245,"_stem":2246,"_extension":18},"/en-us/blog/how-to-harmonize-agile-sprints-with-product-roadmaps",{"title":2231,"description":2232,"ogTitle":2231,"ogDescription":2232,"noIndex":6,"ogImage":2233,"ogUrl":2234,"ogSiteName":685,"ogType":686,"canonicalUrls":2234,"schema":2235},"How to harmonize Agile sprints with product roadmaps","Apply best practices and GitLab features to your product journey, including creating centralized roadmaps, conducting review sessions, and tracking sprint lifecycles.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097231/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2821%29_2pdp2MNB7SoP4MhhiI1WIa_1750097230664.png","https://about.gitlab.com/blog/how-to-harmonize-agile-sprints-with-product-roadmaps","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to harmonize Agile sprints with product roadmaps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Amanda Rueda\"}],\n        \"datePublished\": \"2025-02-04\",\n      }",{"title":2231,"description":2232,"authors":2237,"heroImage":2233,"date":2238,"body":2239,"category":1506,"tags":2240},[1880],"2025-02-04","Picture this: Product and Development teams are working in isolation. Product has created a 12-month roadmap and communicated it to internal stakeholders but didn't review it with their development team. Dev starts building the features planned for the upcoming sprint without considering the broader product roadmap, leading to missed opportunities to optimize timing, like running projects in parallel, accounting for team capacity, or building reusable APIs that could serve multiple initiatives. The lack of coordination results in inefficiencies and delayed value delivery.\n\nBalancing short-term wins with long-term vision isn’t easy; it requires clear communication, aligned priorities, and the right tools. In this guide, you'll learn strategies to help harmonize your Agile sprints with strategic roadmaps, tackle common challenges, and uncover actionable solutions tailored to your teams.\n\n## The importance of a single source of truth\n\nA consistent single source of truth for roadmaps with longer-range goals ensures you and your teams have access to up-to-date information about the bigger picture. In practice, this means maintaining a single, regularly updated platform where all roadmap details reside rather than keeping versions of the roadmap across multiple formats, each typically with slightly different information, causing a misaligned understanding of where you're headed.\n\n### Create a centralized roadmap\n\nBy creating a centralized roadmap for your team, you can:\n\n* communicate long-range strategy\n* minimize miscommunication\n* facilitate cross-functional alignment\n* quickly adapt to changes without losing context\n* self-serve information, reducing dependency on a single point of contact who retains the information\n\n***GitLab tip**: Use [epics](https://docs.gitlab.com/ee/user/group/epics/) and [Roadmap view](https://docs.gitlab.com/ee/user/group/roadmap/) to support both product planning and the transparent monitoring of delivery. The Roadmap view allows you to track progress, identify bottlenecks, and ensure alignment between high-level goals and sprint-level execution.*\n\n![Roadmap view for group](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097239/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097239117.png)\n\n## Collaborative roadmap review practices\n\nEstablish a regular review and sign-off process for roadmap updates that include Product, Engineering, and UX as part of the [product trio](https://www.producttalk.org/product-trio/). Collaborative reviews help you maintain alignment and minimize risk. At GitLab, I meet with my engineering manager and UX designer monthly to review and obtain sign-offs on any changes. We maintain a running sign-off on the roadmap wiki page itself that holds us accountable for keeping the schedule and provides transparency to the rest of the organization.\n\n#### How to extract value from review sessions\n\nTo make the most of the review session, aim for the following best practices:\n\n* Schedule routine reviews, monthly or quarterly, depending on how frequently the roadmap tends to fluctuate at your organization.\n* Validate alignment between product goals, UX lead time, and technical feasibility by discussing potential risks and dependencies upfront.\n  * Validate that the roadmap reflects current organizational business objectives.\n  * Ensure that design timelines are realistic and consider research or validation needs.\n  * Confirm that the roadmap allocates time for technical preparation, such as technical spikes or investigations, and ensures alignment with broader engineering priorities.\n* Optimize team utilization by considering capacity constraints and ensuring the sequence of work aligns with the team’s skill profile. This includes avoiding periods of underutilization or skill mismatches while effectively planning for situations like staffing level drops during holidays.\n* Right-size scope and set appropriate expectations about what can be achieved. We all want to do it all, but perfection is the enemy of progress so prioritize what truly matters to deliver incremental value efficiently. Seek opportunities to optimize by identifying ways to iterate or increase velocity, such as adjusting the order of work to reduce dependencies or leveraging reusable components to streamline development.\n* Encourage open dialogue about trade-offs and priorities to ensure all perspectives are considered. This collaborative approach helps identify creative solutions to challenges and builds consensus on the best path forward.\n\n***GitLab tip**: Use a [GitLab Wiki](https://docs.gitlab.com/ee/user/project/wiki/) page to complement the [Roadmap](https://docs.gitlab.com/ee/user/group/roadmap/) feature. In the wiki, you can include expanded context about your product roadmap, such as business rationale, links to user research, RICE scores, and details about dependencies or risks. Link directly to the roadmap for easy access, and leverage the upcoming discussion threads feature to encourage async collaboration and feedback from your team.*\n\n![PlanFlow product roadmap](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097239/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097239118.png)\n\n## Continuous direction validation and progress measurement\n\nThe goal of a product roadmap isn’t just to stay on track – it’s to deliver real value to your customers. To make space for sharing ongoing user feedback and behavioral data consider incorporating regular touchpoints across your product trio outside of sprint cycles. These sessions can be used to review insights, analyze trends, and ensure that the product roadmap continues to reflect the evolving needs of your users. By grounding roadmap updates using real user insights, you’re not only delivering on outcomes but also adapting to what really matters to your customers.\n\nThe value you ship might come in the form of improved usability, reduced technical debt, or entirely new capabilities. When the product trio is aligned on the roadmap vision, they’re also aligned on the outcomes you’re working to achieve.\n\nTo measure whether you’re on track to deliver those outcomes, you need to closely scope the intended results. Scope creep, like late user story additions, can delay your ability to ship value. Additionally, it’s important to identify work that was delivered but doesn’t align with the roadmap and understand why.\n\n### Sprint planning\n\nRemaining aligned with your product roadmap starts with thoughtful sprint planning. Here are some best practices to keep your team on track and focused on delivering value:\n\n* Clearly define, and narrowly scope, desired outcomes to ensure high confidence in delivery.\n* Identify potential late additions or adjustments that could delay delivery, and build in buffers to maintain focus.\n* Align on the sequence of work with your team to optimize for capacity, skill profiles, and reducing dependencies.\n* To maintain focus and improve confidence of delivering on time, avoid planning to 100% of the team’s capacity. Leave room (10%-20%) for unknowns or new discoveries that may surface during the sprint.\n\n### During the sprint\n\nStaying aligned with your roadmap during the sprint requires focus, communication, and constant evaluation. While delivering value is the goal, it’s equally important to ensure the work in progress aligns with the outcomes you’ve scoped and planned.\n\n* Continuously validate the work in progress against roadmap outcomes to ensure every sprint contributes to the bigger picture.\n* Encourage the team to regularly check if they’re still working toward the intended goals and outcomes.\n* Maintain open communication throughout the sprint. Use daily standups or async updates to surface risks, unplanned work, or dependencies early and adjust where necessary.\n* Be ruthless about protecting the sprint. While the urge to solve emerging problems is natural, unplanned work should be carefully evaluated to avoid derailing agreed-upon priorities.\n* Proactively manage scope creep. If new work surfaces mid-sprint, assess whether it aligns with the current roadmap outcome’s narrowly scoped focus. While additional ideas or features may align conceptually with the broader outcome, they may not fit into the immediate plan to deliver value as soon as possible. Document these suggestions and evaluate if they should be considered as part of future iterations or as a nice-to-have for the future, rather than introducing them into the current sprint and delaying agreed-upon priorities.\n\n### Sprint retros\n\nIn your sprint retrospectives, take time to reflect with your team on how well you are collectively progressing toward your desired outcomes. Questions to ask:\n\n* Did any unplanned work get introduced during the sprint that delayed your ability to deliver value? Identify why it happened and what adjustments can be made.\n* Did you deliver any work that deviated from the roadmap? Discuss what led to this and what you can learn for future planning.\n\nFrom sprint planning through retrospectives, staying focused on delivering tangible outcomes to users and stakeholders is a team responsibility. By aligning every step of the way, you ensure that your roadmap remains a clear guide for delivering value efficiently and consistently.\n\n***GitLab tip:** Use [burndown charts](https://docs.gitlab.com/ee/user/project/milestones/burndown_and_burnup_charts.html) to visualize progress and detect deviations early, helping your team stay focused on delivering outcomes.*\n\n![Burndown chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097239/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097239120.png)\n\n## Delivering roadmap outcomes with confidence\n\nHarmonizing Agile sprints with strategic roadmaps requires intentionality, team buy-in, and the proper tools. By creating a roadmap single source of truth, fostering collaborative reviews, and measuring progress towards outcomes, you can align execution with vision. With GitLab’s robust planning features, teams can turn challenges into opportunities for innovation and growth.\n\nReady to align your sprints with your strategic roadmap? [Start a free trial of GitLab](https://about.gitlab.com/free-trial/) today and explore the tools that can help you deliver outcomes with confidence.\n\n## Learn more\n\n- [Agile planning content hub](https://about.gitlab.com/blog/categories/agile-planning/)\n- [GitLab’s new Planner role for Agile planning teams](https://about.gitlab.com/blog/introducing-gitlabs-new-planner-role-for-agile-planning-teams/)\n- [Get to know the GitLab Wiki for effective knowledge management](https://about.gitlab.com/blog/get-to-know-the-gitlab-wiki-for-effective-knowledge-management/)",[744,1035,9,495],{"slug":2242,"featured":90,"template":699},"how-to-harmonize-agile-sprints-with-product-roadmaps","content:en-us:blog:how-to-harmonize-agile-sprints-with-product-roadmaps.yml","How To Harmonize Agile Sprints With Product Roadmaps","en-us/blog/how-to-harmonize-agile-sprints-with-product-roadmaps.yml","en-us/blog/how-to-harmonize-agile-sprints-with-product-roadmaps",{"_path":2248,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2249,"content":2255,"config":2261,"_id":2263,"_type":13,"title":2264,"_source":15,"_file":2265,"_stem":2266,"_extension":18},"/en-us/blog/how-to-migrate-gitlab-groups-and-projects-more-efficiently",{"title":2250,"description":2251,"ogTitle":2250,"ogDescription":2251,"noIndex":6,"ogImage":2252,"ogUrl":2253,"ogSiteName":685,"ogType":686,"canonicalUrls":2253,"schema":2254},"How to migrate GitLab groups and projects more efficiently","Learn about performance improvements to GitLab migration by direct transfer and what's next.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668760/Blog/Hero%20Images/migration2.jpg","https://about.gitlab.com/blog/how-to-migrate-gitlab-groups-and-projects-more-efficiently","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to migrate GitLab groups and projects more efficiently\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Magdalena Frankiewicz\"}],\n        \"datePublished\": \"2023-08-02\",\n      }",{"title":2250,"description":2251,"authors":2256,"heroImage":2252,"date":2258,"body":2259,"category":693,"tags":2260},[2257],"Magdalena Frankiewicz","2023-08-02","Migrating groups and projects using direct transfer enables you to easily move GitLab resources between GitLab instances using either the UI or API. In a [previous blog post](https://about.gitlab.com/blog/try-out-new-way-to-migrate-projects/), we announced the release of migrating projects as a beta\nfeature **available to everyone**. We described the benefits of the method and steps to try it out.\n\nSince then, we have made further improvements, especially focusing on [efficient](https://gitlab.com/groups/gitlab-org/-/epics/8983) and\n[reliable](https://gitlab.com/groups/gitlab-org/-/epics/8927) migrations for large projects. In this blog, we'll elaborate on these improvements, as well as their impact on the overall process and speed of migrations. We'll also discuss estimating the duration of migrations.\n\n## Imports of CI/CD pipelines\n### Problem: Timing out\nWe received [a bug report about imports of CI/CD pipelines timing out](https://gitlab.com/gitlab-org/gitlab/-/issues/365702) and realized we needed to refine the underlying migration process. We considered the root cause of the problem and possible solutions, and ran proofs of concept. We concluded that we should tackle the\nproblem of having one massive archive file for a project with a large number of a certain relation types (for example, pipelines).\n\n### What we improved\nTo fix the problem of timeouts, we decided to introduce batching to the process of exporting and importing relations (for example, merge requests or pipelines).\n\nBefore we could fully complete the [epic for introducing the batching](https://gitlab.com/groups/gitlab-org/-/epics/9036), we had to introduce a couple of other optimizations\nto the process of exporting CI/CD pipelines.\n\nIn GitLab 15.10, we started:\n- [preloading associations when exporting CI/CD pipelines](https://gitlab.com/gitlab-org/gitlab/-/issues/391593)\n- [exporting commit notes as a separate relation](https://gitlab.com/gitlab-org/gitlab/-/issues/391601)\n\nWith these optimizations, exporting CI/CD pipelines sped up considerably. That allowed for a large number of pipelines in a project to be successfully exported to an archive file and then imported on the destination instance. However, because we were finally importing the pipelines, the overall duration of the migration increased.\n\nIn GitLab 16.3, we are introducing [exporting and importing relations in batches](https://gitlab.com/groups/gitlab-org/-/epics/9036). This has two benefits:\n- improves migration performance by creating and transferring smaller archive files, instead of one file per relation. These files can be very big if a project has thousands of pipelines.\n- enables more parallelism. For example, the CI pipeline data is now split into multiple batches and concurrent Sidekiq jobs (assuming the Sidekiq workers are available on the destination instance, see below) import each batch.\n\nThis improvement is already available by default on GitLab.com.\n- **Users migrating from a self-managed GitLab instance to GitLab.com** have to have their self-managed instance on at least GitLab 16.2, where batched export is available, to benefit from this improvement.\n- **Users migrating from GitLab.com to a self-managed GitLab instance** have to have their self-managed instance on at least GitLab 16.2 and enable the `bulk_imports_batched_import_export` [feature flag](https://docs.gitlab.com/ee/administration/feature_flags.html) to benefit from this improvement.\n\n## Can we estimate the duration of a migration?\nThis question has been asked time and again. The answer is that duration of migration with direct transfer depends on many different factors. Some of them are: \n\n- Hardware and database resources available on the source and destination GitLab instances. More resources on the source and destination instances can result in shorter migration duration because:\n  - the source instance receives API requests, and extracts and serializes the entities to export\n  - the destination instance runs the jobs and creates the entities in its database\n- Complexity and size of data to be exported. For example, imagine you want to migrate two different projects with 1,000 merge requests each. The two projects can take very different amounts of time to migrate if one of the projects has a lot more attachments, comments, and other items on the merge requests. Therefore, the number of merge requests on a project is a poor predictor of how long a project will take to migrate.\n\nThere’s no exact formula to reliably estimate a migration. However, we checked the duration of each job importing a project relation to share with you the average numbers, so you can get an idea of how long importing your projects might take. Here is what we found:\n\n- importing an empty project takes about 2.4 seconds\n- importing one MR takes about 1 second\n- importing one issue takes about 0.1 seconds\n\nYou can find more project relations and the average duration to import them in the table below.\n\n| Project resource type | Average time (in seconds) to import a single record |\n| ---- | ---- |\n| Empty project\t| 2.4 |\n| Repository | 20 |\n| Project attributes\t| 1.5 |\n| Members\t| 0.2 |\n| Labels\t| 0.1 |\n| Milestones\t| 0.07 |\n| Badges\t| 0.1 |\n| Issues\t| 0.1 |\n| Snippets\t| 0.05 |\n| Snippet repositories | 0.5 |\n| Boards\t| 0.1 |\n| Merge requests\t| 1 |\n| External pull requests\t| 0.5 |\n| Protected branches\t| 0.1 |\n| Project feature\t| 0.3 |\n| Container expiration policy\t| 0.3 |\n| Service desk setting\t| 0.3 |\n| Releases | 0.1 |\n| CI/CD pipelines\t| 0.2 |\n| Commit notes\t| 0.05 |\n| Wiki\t| 10 |\n| Uploads |\t0.5 |\n| LFS objects\t| 0.5 |\n| Design\t| 0.1 |\n| Auto DevOps\t| 0.1 |\n| Pipeline schedules\t| 0.5 |\n| References\t| 5 |\n| Push rule\t| 0.1 |\n\n## How can we migrate efficiently?\nWe also know what is needed to achieve the most efficient migration possible. \n\nA single direct transfer migration runs up to five entities (groups or projects) per import at a time, independent of the number of Sidekiq workers available on the destination instance. Importing five concurrent entities is the maximum allowed per migration by direct transfer. This limit has been set to not overload the source GitLab instance, because\nwe saw network timeouts from source instances when we removed this limitation.\n\nThat doesn't mean that if more than five Sidekiq workers are available on the destination instance that they won't be utilized during a migration. On the contrary, more Sidekiq\nworkers help speed up the migration by decreasing the time it takes to import each entity. Import of relations is distributed across multiple jobs and a single project entity\nhas over 30 relations to be migrated. [Exporting and importing relations in batches](https://gitlab.com/groups/gitlab-org/-/epics/9036) mentioned above results in even more\njobs to be processed by the Sidekiq workers. \n\nIncreasing the number of Sidekiq workers on the destination instance helps speed up the migration until the source instance hardware resources are saturated. For more information on\nincreasing the number of Sidekiq workers (increasing concurrency), see [Set up Sidekiq instance](https://docs.gitlab.com/ee/administration/sidekiq/#set-up-sidekiq-instance).\n\nThe number of Sidekiq workers on the source instance should at least be enough to export the five concurrent entities in parallel (for each running import). Otherwise, there will\nbe delays and potential timeouts as the destination is waiting for exported data to become available.\n\nDistributing projects in different groups helps to avoid timeouts. If several large projects are in the same group, you can:\n1. Move large projects to different groups or subgroups.\n1. Start separate migrations each group and subgroup.\n\nThe GitLab UI can only migrate top-level groups. Using the API, you can also migrate subgroups.\n\n## What's next for migrating by direct transfer?\nOf course, we're not done yet! We will continue to improve the direct transfer method, aiming towards coming out of beta and into general availability. Next, we are working on:\n\n- [Moving hardcoded limits of direct transfer to settings](https://gitlab.com/gitlab-org/gitlab/-/issues/384976) - Migration by direct transfer has some hardcoded limits that can be made configurable to allow self-managed GitLab administrators to tune them according to their needs. For GitLab.com, we could set these limits higher than their hardcoded setting.\n- [Removing a 90-minute export timeout](https://gitlab.com/gitlab-org/gitlab/-/issues/392725) - Removing this limit will allow exporting of even larger projects, because only projects that can be migrated in under 90 minutes are supported at the moment.\n\nMore details can be found on our [migrating by direct transfer roadmap direction page](https://about.gitlab.com/direction/manage/import_and_integrate/importers/). We are excited about this roadmap and hope you are too!\n\nWe want to hear from you. What's the most important missing piece for you? What else can we improve? Let us know\nin the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/284495) or [schedule time](https://calendly.com/gitlab-magdalenafrankiewicz/45mins) with the Import and Integrations group product manager, and we'll keep iterating!\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n\nCover image by [Adrien VIN](https://unsplash.com/fr/@4dr13nv1n?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/migration-birds?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[834,1014,9,767],{"slug":2262,"featured":6,"template":699},"how-to-migrate-gitlab-groups-and-projects-more-efficiently","content:en-us:blog:how-to-migrate-gitlab-groups-and-projects-more-efficiently.yml","How To Migrate Gitlab Groups And Projects More Efficiently","en-us/blog/how-to-migrate-gitlab-groups-and-projects-more-efficiently.yml","en-us/blog/how-to-migrate-gitlab-groups-and-projects-more-efficiently",{"_path":2268,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2269,"content":2274,"config":2280,"_id":2282,"_type":13,"title":2283,"_source":15,"_file":2284,"_stem":2285,"_extension":18},"/en-us/blog/how-to-read-open-source-finding-middleman-callbacks",{"title":2270,"description":2271,"ogTitle":2270,"ogDescription":2271,"noIndex":6,"ogImage":1793,"ogUrl":2272,"ogSiteName":685,"ogType":686,"canonicalUrls":2272,"schema":2273},"How to Read Open Source: Finding Middleman Callbacks","Open source empowers you to learn beyond existing documentation. Getting started can be confusing. This is a demonstration finding unlisted Middleman callbacks.","https://about.gitlab.com/blog/how-to-read-open-source-finding-middleman-callbacks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to Read Open Source: Finding Middleman Callbacks\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tyler Williams\"}],\n        \"datePublished\": \"2021-01-20\",\n      }",{"title":2270,"description":2271,"authors":2275,"heroImage":1793,"date":2277,"body":2278,"category":1239,"tags":2279},[2276],"Tyler Williams","2021-01-20","        \n\n{::options parse_block_html=\"true\" /}\n\n\n\n## Why read open source? \n\nWhen folks write about open source, I think there is a strong emphasis on contributing to open source projects, which makes sense. Many software professionals are excited to give back to the community. \n\nBut beyond adding to your favorite project, open source philosophies have a number of other benefits in our daily lives. In particular, I love open source because it allows me to learn more about my tools when the documentation is out of date, incomplete, or leaves me with additional questions from my own curiosity. \n\nThis happened to me recently when I was working on [https://about.gitlab.com](https://about.gitlab.com), a static site built with [Middleman](https://middlemanapp.com/). I needed to find a more comprehensive list of available [callbacks](https://middlemanapp.com/advanced/custom-extensions/#callbacks) in the Middleman lifecycle.\n\nI hope this blog post is helpful if you're looking for existing Middleman callbacks, or if you're getting started reading through the source code of your favorite open source tools. \n\n## The task at hand\n\nIf you're getting started reading open source, I find it helps to have a specific task. Any unfamiliar codebase can be challenging to navigate. Having a goal in mind narrows your focus. Here was my task for Middleman:\n\nI recently created a merge request to [add Webpack devServer to the local development environment](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/71845). I had to modify some existing behavior of our Middleman preview server and wanted to use one of the lifecycle callbacks to modify the preview server's log output. \n\nHowever, the Middleman documentation does not currently list all available callbacks, nor where they happen in the lifecycle. The [extension docs](https://middlemanapp.com/advanced/custom-extensions/) say: \n\n> Middleman extensions are Ruby classes which can hook into various points of the Middleman system, add new features and manipulate content. This guide explains some of what's available, but you should read the Middleman source and the source of plugins like middleman-blog to discover all the hooks and extension points.\n\nI took them up on their advice and read through the [Middleman source code](https://github.com/middleman/middleman) to find the available callbacks. Here's what I found, and how I found them.\n\n## Callbacks available in Middleman Core\n\n1. `initialized`: called before config is parsed, and before extensions are registered\n1. `configure`: called to run any `configure` blocks (once for current environment, again for the current mode)\n1. `before_extensions`: called before the `ExtensionManager` is instantiated\n1. `before_instance_block`: called before any blocks are passed to the configuration context\n1. `before_sitemap`: called before the `SiteMap::Store` is instantiated, which initializes the sitemap\n1. `before_configuration`: called before configuration is parsed, mostly used for extensions\n1. `after_configuration`: called after extensions have worked\n1. `after_configuration_eval`: called after the configuration is parsed, before the pre-extension callback\n1. `ready`: called when everything is stable\n1. `before_build`: called before the site build process runs\n1. `after_build`: called after the builder is complete\n1. `before_shutdown`: called in the `shutdown!` method, which lets users know the application is shutting down\n1. `before`: called before Rack requests\n1. `before_server`: called before the `PreviewServer` is created\n1. `reload`: called before the new application is initialized on a reload event\n\n## How to find Middleman Callbacks\n\n1. [Clone](https://docs.gitlab.com/ee/gitlab-basics/start-using-git.html) the [Middleman repository](https://github.com/middleman/middleman) to your local machine. \n1. Open the Middleman directory in a text editor, IDE, or any tool that allows you to easily search through a folder's files for specific strings. \n1. Start with the [existing documentation](https://middlemanapp.com/advanced/custom-extensions/#callbacks) if it exists. Middleman lists the names of a few callbacks. You can search the directory for the string `after_configuration`.\n1. In this instance, you should be able to find that string used like so: `execute_callbacks(:after_configuration)`.\n1. You may also find it listed with other similar symbols in `middleman-core/lib/middleman-core/application.rb`. \n1. Read through the related blocks of code around these search results, you'll get some additional context for how they work, and you may find additional search terms that will be helpful.\n1. In the case of Middleman callbacks, you can continue to search for combinations of the `execute_callbacks` method with any callback listed in `middleman-core/lib/middleman-core/application.rb` to find where and when specific callbacks are used.\n\n## Contribute!\n\nWith this in-depth knowledge of your tool, you can be more productive at your day-to-day work, and you can contribute back to open source more easily. \n\nIn my case, I was able to identify the best callback for my usecase, and I now plan to check in with the Middleman team and ask if they would accept a contribution to their documentation with this information so it's easier for other folks to find in the future.\n",[721,1035,9],{"slug":2281,"featured":6,"template":699},"how-to-read-open-source-finding-middleman-callbacks","content:en-us:blog:how-to-read-open-source-finding-middleman-callbacks.yml","How To Read Open Source Finding Middleman Callbacks","en-us/blog/how-to-read-open-source-finding-middleman-callbacks.yml","en-us/blog/how-to-read-open-source-finding-middleman-callbacks",{"_path":2287,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2288,"content":2294,"config":2299,"_id":2301,"_type":13,"title":2302,"_source":15,"_file":2303,"_stem":2304,"_extension":18},"/en-us/blog/how-to-security-as-code",{"title":2289,"description":2290,"ogTitle":2289,"ogDescription":2290,"noIndex":6,"ogImage":2291,"ogUrl":2292,"ogSiteName":685,"ogType":686,"canonicalUrls":2292,"schema":2293},"Why implementing security as code is important for DevSecOps","We created a DevSecOps assessment to help your company level up its DevSecOps capabilities.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663618/Blog/Hero%20Images/how-to-implement-security-as-code.jpg","https://about.gitlab.com/blog/how-to-security-as-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why implementing security as code is important for DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2020-03-12\",\n      }",{"title":2289,"description":2290,"authors":2295,"heroImage":2291,"date":2296,"body":2297,"category":718,"tags":2298},[1423],"2020-03-12","\n## What is security as code?\n\nSecurity as code is a driving force in the future of [application security](/topics/devsecops/).\nAccording to O’Reilly, [security as code is the practice of building security\ninto DevOps tools and workflows](https://www.oreilly.com/library/view/devopssec/9781491971413/ch04.html) by mapping out how changes to code and infrastructure\nare made and finding places to add security checks, tests, and gates without\nintroducing unnecessary costs or delays.\nDevelopers can define infrastructure using a\nprogramming language with infrastructure as code. The same needs to happen to bring security to the speed of DevOps.\n\nAt a basic level, security as code can be achieved by integrating security\npolicies, tests, and scans into the pipeline and code itself. Tests should be\nrun automatically on every code commit, with results made immediately available\nto developers for fixing. By bringing security scans to the code as it’s written,\nteams will save both time and money by streamlining the review process later in\nthe software development lifecycle (SDLC).\n\n## Why is it important?\n\nSecurity as code is key to shifting left and achieving [DevSecOps](/solutions/security-compliance/): It requires\nthat security be defined at the beginning of a project and codified for\nrepeated and consistent use. In this way, it gives developers a self-service\noption for ensuring their code is secure.\n\nPredefined security policies boost efficiency, and also allow for checks on\nautomated processes to prevent any mishaps in the deployment process (like\naccidentally taking down the whole infrastructure because a problem wasn’t\nidentified in a staging environment).\n\n## Six security as code capabilities to prioritize\n\nFrancois Raynaud, founder and managing director of [DevSecCon](https://www.devseccon.com/),\nsaid that [security as code is about making security more transparent and\ngetting security practitioners and developers to speak the same language](https://techbeacon.com/devops/devseccon-security-code-secure-devops-techniques-track).\nIn other words – security teams need to understand how developers work, and use that\ninsight to help developers build the necessary security controls into the SDLC.\nDevelopers can reciprocate by staying open-minded as they adopt new tools and\npractices to boost security during the development process. Here are six best\npractices and capabilities to build into your pipeline:\n\n1. Automate security scans and tests (such as [static analysis](https://docs.gitlab.com/ee/user/application_security/sast/),\n[dynamic analysis](https://docs.gitlab.com/ee/user/application_security/dast/),\nand penetration testing) within your pipeline so that they can be reused across\nall projects and environments.\n1. Build a continuous feedback loop by presenting results to developers, allowing\nthem to remediate issues while coding and learn best practices during the coding\nprocess.\n1. Evaluate and monitor automated security policies by building checks into the\nprocess. Verify that sensitive data and secrets are not inadvertently shared or published.\n1. Automate complex or time-consuming manual tests via custom scripts, with\nhuman sign-off on results if necessary. Validate the accuracy and efficiency of\ntest scripts so that they can be replicated across different projects.\n1. Test new code within a staging environment to allow for thorough security and\nlow-impact failure, and test on every code commit.\n1. Scheduled or continuous monitoring should automatically create logs (or red\nflags) within a review dashboard (such as GitLab’s [Security Dashboard feature](https://docs.gitlab.com/ee/user/application_security/security_dashboard/index.html)).\n\n## Security as code is a best practice for a bigger goal\n\nSecurity as code gives pragmatic meaning to the concept of DevSecOps, but it\nshould not be your end goal. Ultimately, security as code is a means to get more people on board with integrating security throughout your\nSDLC. The idea will feel familiar to developers who\nhave practiced infrastructure as code, and it provides an opportunity for\nsecurity to step into the fray both to better understand software development\nand to help design the policies that will be codified in the process.\n\nAs your team works its way toward becoming a well-oiled DevSecOps machine,\nsecurity as code will inevitably present itself as a smart solution within a complex endeavor.\n\n## GitLab’s DevSecOps methodology assessment\n\nThere’s a lot to cover when standing up a DevSecOps process – so to help you\nmaster the key elements, we created a DevSecOps methodology assessment. Score\nyourself on 20 capabilities, and then use those scores to understand your DevSecOps\nmaturity level, and determine what actions your team can take to bring your DevSecOps to\nthe next level. [Download the assessment here.](https://about.gitlab.com/resources/devsecops-methodology-assessment/)\n\nCover image by [Tim Evans](https://unsplash.com/@tjevans) on [Unsplash](https://unsplash.com/photos/Uf-c4u1usFQ)\n{: .note}\n",[722,787,108,9],{"slug":2300,"featured":6,"template":699},"how-to-security-as-code","content:en-us:blog:how-to-security-as-code.yml","How To Security As Code","en-us/blog/how-to-security-as-code.yml","en-us/blog/how-to-security-as-code",{"_path":2306,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2307,"content":2313,"config":2319,"_id":2321,"_type":13,"title":2322,"_source":15,"_file":2323,"_stem":2324,"_extension":18},"/en-us/blog/how-to-setup-gitlab-for-multiple-product-teams",{"title":2308,"description":2309,"ogTitle":2308,"ogDescription":2309,"noIndex":6,"ogImage":2310,"ogUrl":2311,"ogSiteName":685,"ogType":686,"canonicalUrls":2311,"schema":2312},"Managing multiple product categories in GitLab","Exploring issue mangement options for product teams that are all contributing to a single repository.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680898/Blog/Hero%20Images/filing-cabinet.jpg","https://about.gitlab.com/blog/how-to-setup-gitlab-for-multiple-product-teams","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Managing multiple product categories in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabe Weaver\"}],\n        \"datePublished\": \"2019-12-05\",\n      }",{"title":2308,"description":2309,"authors":2314,"heroImage":2310,"date":2316,"body":2317,"category":300,"tags":2318},[2315],"Gabe Weaver","2019-12-05","\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2019-12-30.\n{: .alert .alert-info .note}\n\nIn a [recent tweet](https://twitter.com/mrguillaum/status/1202530376415088641), a member of the wider GitLab community asked how to set up GitLab so multiple product teams, each with their own unique workflows, could contribute effectively to a single repository. Let's explore some options.\n\n## GitLab's approach\n\nGiven that GitLab is growing very quickly, we've had to put a lot of thought into organizational structure and mapping to create a well structured workflow within GitLab. The entirety of the product surface area is comprised of [seven distinct product sections](https://handbook.gitlab.com/handbook/product/categories/). Each section consists of several [stages](https://handbook.gitlab.com/handbook/product/categories/#hierarchy), and each stage contains several categories. Our cross-functional product teams are called groups and the groups typically own a handful of categories within a stage. For example, [I'm the senior product manager for the Project Management group](/company/team/#gweaver), which is responsible for the [issue tracking](https://docs.gitlab.com/ee/user/project/issues/), [Kanban boards](https://docs.gitlab.com/ee/user/project/issues/index.html#issue-boards), and [time tracking](https://docs.gitlab.com/ee/user/project/time_tracking.html#time-tracking) categories. There are two other groups within the Plan stage.\n\nWhile each product team can technically have their own workflow, we've deliberately tried to standardize across teams. We accomplish this primarily through leveraging group level issue boards and labels. I've created a [demo group](https://gitlab.com/tech-marketing/demos/gitlab-agile-demo/demo-group/examples-for-configuring-gitlab-for-multiple-product-teams/gitlab-approach) where you can see the our basic approach setup directly within GitLab. The first step to get up and running is to configure the labels. Within our top level group, we've created specific label sets to help us organize our issues, MRs, and issue boards.\n\n### Labels for managing ownership and surface area:\n\n- `stage::name` denotes which issues belong to a given stage. In the demo, I created the `stage::plan` label. This is especially useful for filtering issue boards. By using the [scoped label](https://docs.gitlab.com/ee/user/project/labels.html#scoped-labels) syntax, we get mutually exclusive labels so an issue or MR can only ever be assigned to a single stage.\n- `group::name` denotes which issues belong to a given cross-functional product team. In the demo, I created `group::project management`, `group::portofolio management`, and `group::certify`, which are the actual groups within GitLab's Plan stage. Again, the use of scoped labels assures mutual exclusivity.\n- `category::name` denotes which issues belong to a given category within a stage. In the demo, I created `category::epics`, `category::issue boards`, `category::issue tracking`, `category::requirements management`, `category::roadmaps`, and `category::service desk`, which are the main categories within the Plan stage.\n\n### Labels for tracking workflow and issue types:\n\n- `type::name` denotes the type of issue. In the demo, I've created `type::debt`, `type::feature`, and `type::defect`. Given an issue can only ever be one type, the use of the scoped label syntax is best.\n- `workflow::state` denotes which workflow state an issue is in. In the demo, I've created `workflow::validation backlog`, `workflow::problem validation`, `workflow::solution validation`, `workflow::planning breakdown`, `workflow::scheduling`, `workflow::ready for dev`, `workflow::in dev`, `workflow::review`, and `workflow::verification`. You can design your workflow however you want, but it is helpful to have a [discussion](https://gitlab.com/gitlab-org/plan/issues/34) with your team to clarify transitions from one workflow state to another.\n\nWith our labels in place, we can now spin up some group level issue boards for the different product teams. The [Project Management team's issue board](https://gitlab.com/groups/tech-marketing/demos/gitlab-agile-demo/demo-group/examples-for-configuring-gitlab-for-multiple-product-teams/gitlab-approach/-/boards/1438121?&label_name[]=group%3A%3Aproject%20management&label_name[]=stage%3Aplan) uses a common naming convention and is scoped to only include issues that contain the `group::project management` and `stage::plan` labels. The lists are set up according to the `workflow::*` labels we defined earlier. The [Portfolio Management](https://gitlab.com/groups/tech-marketing/demos/gitlab-agile-demo/demo-group/examples-for-configuring-gitlab-for-multiple-product-teams/gitlab-approach/-/boards/1438186?&label_name[]=group%3A%3Aportfolio%20management&label_name[]=stage%3Aplan) and [Certify](https://gitlab.com/groups/tech-marketing/demos/gitlab-agile-demo/demo-group/examples-for-configuring-gitlab-for-multiple-product-teams/gitlab-approach/-/boards/1438188?&label_name[]=group%3A%3Acertify&label_name[]=stage%3Aplan) teams use a similar structure. The benefit of standardizing on a workflow is that it allows you to easily move from one team to the next and understand what's going on, as well as create rollup issue boards that cut across many teams. In the demo, I created a [stage level issue board](https://gitlab.com/groups/tech-marketing/demos/gitlab-agile-demo/demo-group/examples-for-configuring-gitlab-for-multiple-product-teams/gitlab-approach/-/boards/1438190?&label_name[]=stage%3Aplan) for Plan. All of the issues belong to the same project that contains the single repository where all of the product teams contribute.\n\nThe last thing to cover is using milestones to align everyone around a shared release cadence. In the demo, I created [two group milestones](https://gitlab.com/groups/tech-marketing/demos/gitlab-agile-demo/demo-group/examples-for-configuring-gitlab-for-multiple-product-teams/gitlab-approach/-/milestones) that all of the issues map to. This allows you to see progress at a high level as well as a breakdown of issue status by label type. We're currently working on allowing an issue to be associated to [multiple milestones](https://gitlab.com/gitlab-org/gitlab/issues/5135), enabling milestones to have [types](https://gitlab.com/gitlab-org/gitlab/issues/35290), and adding [burnup charts](https://gitlab.com/gitlab-org/gitlab/issues/6903) to milestones. Once these launch, teams will have even more flexibility to create shared milestones as well as team-specific milestones.\n\n## Other options\n\nThere are a few less desirable ways to setup GitLab to help coordinate multiple product teams:\n\n### The project approach\n\nWith [the project approach](https://gitlab.com/examples-for-configuring-gitlab-for-multiple-product-teams/project-approach), you can create a project for each individual team and disable the repo. Then create a shared repo where all the merge requests go. Each product team would then have its own project-level milestones, issue boards, and issues, but could still nicely tie into the shared repository. Here's an [example issue and MR](https://gitlab.com/examples-for-configuring-gitlab-for-multiple-product-teams/project-approach/portfolio-management-team/issues/1) demonstrating how this works. The downside of this approach is that you lose the \"Create Merge Request\" button that issues will have if the issue and repo are within the same project. The \"Create Merge Request\" button allows you to quickly bootstrap your work by [spinning up a branch and WIP MR](https://gitlab.com/examples-for-configuring-gitlab-for-multiple-product-teams/gitlab-approach/shared-project/merge_requests/1). You also lose the ability to accurately track [cycle analytics](https://docs.gitlab.com/ee/user/analytics/value_stream_analytics.html) out-of-the-box because it is currently based on project level data and not group level data.\n\n### Single issue tracker project\n\nI've seen some teams use a [single issue tracker project](https://gitlab.com/examples-for-configuring-gitlab-for-multiple-product-teams/multiple-repos) with multiple repositories within the group. They use a similar labeling system described in GitLab's approach, but separate the concerns of code management from issue management. While this works, it has the same downsides as the project approach.\n\n## General best practices & conventions\n\nTo sum things up, there are some generally good practices to follow when setting up your organizational structure within GitLab:\n\n- Centralize label management within the top level group. This helps avoid label duplication and keeps all teams on the same page.\n- Manage issues via issue boards from the group level and keep issues within the same project as their repository to get the most benefit from GitLab's capabilities.\n- Create issue boards for different activities – use milestone lists for a release planning board, use scoped workflow labels for a sprint board, etc.\n- Create shared milestones within the top-level group. This allows them to cascade throughout all sub-groups and projects.\n- If you use epics, a maximum of three layers of nesting is recommended to avoid confusion and unneccessary complexity.\n- Use [GitLab triage](https://gitlab.com/gitlab-org/gitlab-triage) to create policies to help automate issue management.\n\nIf you want to talk shop or bounce around ideas, feel free to reach out via email – gweaver at gitlab dot com.\n\nCover image by [Maksym Kaharlytskyi](https://unsplash.com/@qwitka) on [Unsplash](https://unsplash.com/photos/Q9y3LRuuxmg) {: .note}\n",[974,9],{"slug":2320,"featured":6,"template":699},"how-to-setup-gitlab-for-multiple-product-teams","content:en-us:blog:how-to-setup-gitlab-for-multiple-product-teams.yml","How To Setup Gitlab For Multiple Product Teams","en-us/blog/how-to-setup-gitlab-for-multiple-product-teams.yml","en-us/blog/how-to-setup-gitlab-for-multiple-product-teams",{"_path":2326,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2327,"content":2333,"config":2338,"_id":2340,"_type":13,"title":2341,"_source":15,"_file":2342,"_stem":2343,"_extension":18},"/en-us/blog/how-to-shorten-conversation-cycle",{"title":2328,"description":2329,"ogTitle":2328,"ogDescription":2329,"noIndex":6,"ogImage":2330,"ogUrl":2331,"ogSiteName":685,"ogType":686,"canonicalUrls":2331,"schema":2332},"How to shorten the conversation cycle","Four simple steps to move faster from idea to production.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671350/Blog/Hero%20Images/shorten-conversation-cycle.jpg","https://about.gitlab.com/blog/how-to-shorten-conversation-cycle","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to shorten the conversation cycle\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2017-06-19\",\n      }",{"title":2328,"description":2329,"authors":2334,"heroImage":2330,"date":2335,"body":2336,"category":811,"tags":2337},[1133],"2017-06-19","\n\nIf your new features often get stalled in the initial discussion phase, read our four tips for shortening the conversation cycle and shipping faster.\n\n\u003C!-- more -->\n\n## 1. Measure your cycle time\n\nThe first step towards making a change is having the numbers to motivate it. If you measure the duration of time from the moment an idea is first discussed in chat, all the way through to its release in production, you can make a good case for changing your approach if others can see that something is causing delays. Try a feature like [cycle analytics](/solutions/value-stream-management/) to monitor each stage in your workflow.\n\n## 2. Start with minimum viable changes\n\nYou've identified the problem, now how do you fix it? Where ideas for new features and improvements often get stuck is on how to implement them. The idea may be too ambitious or too time consuming to ship easily, so it gets pushed back in favor of more manageable changes. Try breaking up new products or features into smaller pieces of functionality. [Iteration is one of our company values](https://handbook.gitlab.com/handbook/values/) and while it's often one of the more uncomfortable ones, it is effective. Do the smallest thing possible and release it quickly – you can keep iterating from there.\n\n## 3. Include gatekeepers early on\n\nWho needs to approve something before you ship? Don't leave them out until the last minute. Including stakeholders, security experts, product managers and UX team members in the conversation in the early phases prevents bottlenecks ahead of release, and ensures that most errors have been caught and addressed before you move into production. Read more about [shipping faster without sacrificing security or quality](/blog/speed-security-quality-with-hackerone/).\n\n## 4. Get everyone on board\n\nAcknowledging that a feature or product is not polished and needs more work, yet releasing it anyway, feels unnatural to most of us, so you may meet some resistance to the idea. Working in this way does offer benefits to both business owners and developers, which you can communicate to help persuade hesitant team members.\n\nFor example, you can respond more quickly to market needs and user feedback by shipping minimum viable changes often, which is good news for your business. For developers, it's easier to troubleshoot a small release and having faster, more frequent feedback on work gives more of a sense of progress and boosts motivation.\n\nMoving towards smaller releases to shorten the time between idea and production may feel strange at first, but you'll start seeing results quickly.\nShortening the conversation cycle is just one principle of Conversational Development. Visit [conversationaldevelopment.com](http://conversationaldevelopment.com/) to learn more.\n{: .alert .alert-gitlab-orange}\n\n[Cover image](https://unsplash.com/@djmalecki?photo=fw7lR3ibfpU) by [Dawid Malecki](https://unsplash.com/@djmalecki) is licensed under [CC0 1.0](https://creativecommons.org/publicdomain/zero/1.0/)\n{: .note}\n",[696,9],{"slug":2339,"featured":6,"template":699},"how-to-shorten-conversation-cycle","content:en-us:blog:how-to-shorten-conversation-cycle.yml","How To Shorten Conversation Cycle","en-us/blog/how-to-shorten-conversation-cycle.yml","en-us/blog/how-to-shorten-conversation-cycle",{"_path":2345,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2346,"content":2352,"config":2358,"_id":2360,"_type":13,"title":2361,"_source":15,"_file":2362,"_stem":2363,"_extension":18},"/en-us/blog/how-to-spot-development-issues",{"title":2347,"description":2348,"ogTitle":2347,"ogDescription":2348,"noIndex":6,"ogImage":2349,"ogUrl":2350,"ogSiteName":685,"ogType":686,"canonicalUrls":2350,"schema":2351},"How to spot development issues and fix them fast","Guest author Patrick Foster shares how to get things back on track when a development project starts to go awry.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680343/Blog/Hero%20Images/spot-dev-issues.jpg","https://about.gitlab.com/blog/how-to-spot-development-issues","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to spot development issues and fix them fast\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Patrick Foster\"}],\n        \"datePublished\": \"2017-10-16\",\n      }",{"title":2347,"description":2348,"authors":2353,"heroImage":2349,"date":2355,"body":2356,"category":811,"tags":2357},[2354],"Patrick Foster","2017-10-16","\n\nDevelopment issues can be expensive to fix — and the later you uncover them,\nthe worse it is. If you’re running (or dependent on) a development project it’s\nreally important that you stay on the ball at all times. Communication,\ntransparency, and accountability are all essential. Here are some development\nproject red flags that you need to be aware of – as well as how you\ncan address things if it looks like they are starting to go wrong...\n\n\u003C!-- more -->\n\n## Track developer stress and coping strategies\n\n**Start with getting to know the team behind the work – learn how to track their\nstress levels accurately.**\n\nIf you are working with developers or managing a team of them, you will start to\nget a feel for their stress levels via your day-to-day interactions. If you know\nthem reasonably well, you will probably be able to recognize stress in the\nthings that they say, or in way that they act. ([Everyone handles stress differently](http://serendip.brynmawr.edu/biology/b103/f03/web1/skim.html),\ndepending on their personality and past experience).\n\nThe easiest way to diagnose developer stress? Over-complication of simple tasks.\nIf you find it’s taking you about 16 emails to discuss some simple edits to a\nsite menu, it’s probably a sign that something is wrong, and that undue pressure\nis being applied somewhere along the line. Once we enter stressful realms,\ncommunication tends to get very fraught and people will start to feel on edge\nall the time.\n\nIt’s important to address any workflow or workload issues that might be causing\nstress, but don’t forget that one of the biggest stressors is probably other\npeople. It could be that development personalities and job responsibilities are\nclashing in unproductive ways. Development teams need the right mix of people\nand skillsets in order to function harmoniously (just like any other team).\n[Does your team have all of these crucial attributes?](/blog/attributes-of-successful-development-teams/)\nIt could be that the team is out of balance somewhere, so spend some time with\npeople in order to improve team dynamics and implement coping strategies.\n\n## Stop little things from snowballing\n\nIf little tasks and small instructions are constantly getting ‘lost in briefing’\nit could signal that there is a missing chain in the communication workflow.\nSmall oversights can quickly build and create mountains of frustrations for the\nteam. Missing even the most minute detail can completely derail an otherwise\nsuccessful development project, so react immediately if you spot any oversights,\nno matter how small.\n\nFeatures like Issues and [Issue Boards](/stages-devops-lifecycle/issueboard/) can help everyone break down\ncomplex tasks into smaller individual ones and track their progress across the\ndevelopment lifecycle. Singular tasks are often be the best way for developers\nto tackle a subject – this is especially true for junior developers and\ntrainees. Overloading people with too many tasks at once will crash their\nbandwidth, so approach briefs in a very ordered manner.\n\n## Adopt ‘slow’ solutions (where needed)\n\nSometimes it’s not possible to whack a plaster on an issue and call it a day.\nSome solutions to development problems are just as complex as the problems\nthemselves, and you need to focus on proper, rather than fast, implementation.\nYou might need to factor in extra time or budget (gulp) in order to get a\ndevelopment project or team back on track. The effort you put into the (right)\nsolution will pay off in the long run.\n\nA good example of a ‘slow solution’ is the [steep learning curve developers face when adopting new tools and ways of working like Git](/blog/learning-curve-is-the-biggest-challenge-developers-face-with-git/)\n– but that shouldn’t put you off. It may be that right now your development team\nneed to spend some time getting to grips with a process, framework, or tool that\nwill save hundreds of development hours further down the line.\n\nYou may also want to take advantage of the [Minimum Viable Change Principle](/blog/how-to-shorten-conversation-cycle/) that\ntakes into account the full scale of development complexity, but focuses on\nmoving the project forwards with a minimum viable fix, allowing for further\niterations when the time is right. This is a great strategy that should be\nimplemented on a regular basis, especially when time is of the essence and a\nfull raft of features is not immediately feasible.\n\n## Focus on logic\n\nDevelopment is an extremely logical task, and you need to approach development\ntroubleshooting in the same logical and methodical way. Development problems\nneed to be fully mapped out in a logical sequence, not treated reactively with\n‘creative’ solutions.\n\nSpecificity is a really important thing when discussing potential development\nissues.  Unclear and vague pronouns aren’t helpful – be ready to be super\nanalytical and direct.\n\nDevelopment projects are notorious for running over-budget and taking up loads\nof business time, which can cause logic to fly out of the window in a state of\npanic. Think carefully about any knee-jerk reactions, and don’t be so ready to\nburn a whole project because of a few final teething problems.\n\n## Review your team model\n\n**It all works better when you embrace the idea that “product,” “design,” and\n“engineering” are just different perspectives on the same thing.** – [Greg Veen](http://jrsinclair.com/articles/2017/faster-better-cheaper-art-of-making-software/#fn:3)\n\nSlow progress or projects stalling could come down to your software team model.\nWhen was the last time you reviewed yours? There are a few different software\nproject management methodologies that can really help structure and improve\npreviously ‘messy’ development teams. Have a look around you, and see whether\nit’s time your team went in for an upgrade?\n\nFrom [Scrum to Kanban](https://devops.com/kanban-vs-scrum/), there is an\nincreasing focus on [DevOps](/topics/devops/)\nas a way to have more joined-up development and software teams. A product\nengineering model can be a great way to improve company and project efficiency –\nit’s certainly [helped big software companies like Shopify refine their development strategy](https://engineering.shopify.com/blogs/engineering/why-shopify-moved-to-the-production-engineering-model).\n\n## Measure constantly for project agility\n\nIf you want to find problems, you need to be tracking them first! From\ncollecting the right data, to testing and tracking, make sure that you\nconstantly keeping tabs on the project as it progresses. Adopting tools like\n[Cycle Analytics](/solutions/value-stream-management/) will\nensure that you always stay on track with how your projects are progressing,\nand the data you’ll harness will become an invaluable source of business intelligence.\n\nOne great way to incentivize teams and fix any looming issues on the horizon is\nto make performance and progress visible, then discuss them openly.\nWhen a problem does surface, treat it as a single entity and don’t wait for any\nmore to pile up – this is a much more [agile approach](http://agilemethodology.org/)\nthat will help keep projects streamlined.\n\n## Build communication into the project\n\nShow, don’t tell. Development progress needs to be communicated in clear and\nvisual terms – language is often an insufficient medium for development (and\ndevelopers). Consistent bug reporting and watertight specifications are\nimportant. Specification quality needs to be at 100 percent, otherwise you can’t\nexpect the code you get back to be 100 percent either.\n\nIf development is rubbing up against teams with little development experience,\ncommunication becomes even more essential. Reducing the amount of jargon can\nhelp non-developers stay in the loop, but at the same time, it’s important that\na business learns how to adopt development and software language (especially if\nit relies on it for its income).\n\nIt’s disheartening to see how little some software company employees actually\nknow about software development – better communication can help rectify this issue.\n\n## The halfway point check-in\n\nThe halfway point is a critical yardstick for any development project. It’s a\ngreat time to check in with your team and see how they are getting on. By then,\nyou should have a pretty robust feel for how people are coping, and whether the\nproject is going to be delivered in time.\n\nHaving a formal process and meeting for the halfway point isn’t always feasible\n(it largely depends on project size), but it’s a good idea to do nevertheless.\n[Getting the team together and getting visibility on progress is also a morale booster](https://www.themuse.com/advice/7-great-ways-to-boost-your-teams-morale).\n\nIf things aren’t looking good halfway? Don’t just cross your fingers, and hope\nfor the best for the rest of the time – you need to tackle the issue there and\nthen. Go away and review all the data that’s available to you before you make\nany rash killswitch decisions.\n\n*In order to keep your development projects on track, you need to become good at\ncommunicating with your development team, embracing agile solutions wherever\npossible. What’s your number one project management tip?*\n\n### About the guest author\n\nPatrick Foster is an ecommerce consultant and coach, and has been helping\nfounders and ecommerce startups for longer than he cares to admit. A passionate\nadvocate of ecommerce journeys and stories, he is always looking to find\nlikeminded thinkers and entrepreneurs. Come say hello on [Twitter](https://twitter.com/myecommercetips).\n\n[Cover image](https://unsplash.com/photos/SITaCHf7jjg) by [Alexander Shustov](https://unsplash.com/@alexandershustov) on Unsplash\n{: .note}\n",[9],{"slug":2359,"featured":6,"template":699},"how-to-spot-development-issues","content:en-us:blog:how-to-spot-development-issues.yml","How To Spot Development Issues","en-us/blog/how-to-spot-development-issues.yml","en-us/blog/how-to-spot-development-issues",{"_path":2365,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2366,"content":2371,"config":2377,"_id":2379,"_type":13,"title":2380,"_source":15,"_file":2381,"_stem":2382,"_extension":18},"/en-us/blog/how-to-status-checks",{"title":2367,"description":2368,"ogTitle":2367,"ogDescription":2368,"noIndex":6,"ogImage":1086,"ogUrl":2369,"ogSiteName":685,"ogType":686,"canonicalUrls":2369,"schema":2370},"How to use external status checks for merge requests","Want to integrate third-party systems and apps with GitLab merge requests? Here's everything you need to know.","https://about.gitlab.com/blog/how-to-status-checks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use external status checks for merge requests\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-10-04\",\n      }",{"title":2367,"description":2368,"authors":2372,"heroImage":1086,"date":2374,"body":2375,"category":832,"tags":2376},[2373],"Cesar Saavedra","2021-10-04","\n\nThe [external status checks for merge requests capability](/releases/2021/07/22/gitlab-14-1-released/#external-status-checks-for-merge-requests) was recently introduced in GitLab and it allows the integration of third-party systems and applications with GitLab merge requests.\n\n## What are \"external status checks for merge requests\"?\n\nExternal status checks are API calls to systems or applications that sit outside GitLab. These API calls are invoked during merge requests, which display a widget with the status of each external check. With external status checks, you can integrate GitLab with third-party systems, e.g. Salesforce, PeopleSoft, Microsoft Dynamics, etc., that require manual approval for merge requests. This makes it easy to see that merge requests have met external requirements before being merged, adding an extra method to ensure compliance and audit requirements are met.\n\n## Steps to enable and use external status checks for merge requests\n\nIn this example, I have a sample project called **my-proj**, for which I'd like to add and exercise a single external status check, which will hypothetically do some kind of validation for the merge request.\n\n### Adding an external status check to your project\n\nExternal status checks are added to merge requests by heading to your project’s **Settings > General** and then expanding the **Merge requests** section. Towards the bottom of the **Merge requests** section, you will see an **Add status check** button, which you will need to click to to display the **Add status check** pop-up dialog:\n\n\u003C!--\n![Add status check dialog](https://about.gitlab.com/images/blogimages/how-to-status-checks/1-add-status-check-dialog.png){: .shadow.small.center.wrap-text}\nAdd status check dialog with filled values\n{: .note.text-center}\n-->\n\n\u003Cimg src=\"/images/blogimages/how-to-status-checks/1-add-status-check-dialog.png\" width=\"50%\" height=\"50%\">\nAdd status check dialog with filled values\n{: .note.text-center}\n\nIn the dialog above, the external service name is being given the name *compliance-check*. The external API that will be called is:\n\n> https://tech-marketing-sandbox-cd-compvalidator.compliance.gitlabworkshops.io/validate\n\n> **NOTE:** the *validate* service above was [a simple Java service that I set up](https://gitlab.com/tech-marketing/sandbox/cd/compvalidator) ahead of time to mimic a third-party external service. It returned an HTTP 200 success message when invoked. In a real life scenario, this external API call would be a SaaS service or an on-premises ERP system, for example.\n\nThe API above is a call - invoked from any merge requests created under this project - to an external system that will run a compliance check and validate modifications to this application.\n\nAs the target branch, the default *Any branch* has been selected. Another option could have been the *main* branch.\n\nWhen you click the **Add status check** button, an entry will be created in the **Status checks** table, as shown below:\n\n![status check table](https://about.gitlab.com/images/blogimages/how-to-status-checks/2-status-checks-table.png){: .shadow.small.center.wrap-text}\nStatus checks table\n{: .note.text-center}\n\n### External status check in action\n\nTo exercise the external status check for merge requests, we need to create a merge request. But before that, let's create an issue.\n\n1. Create an issue by clicking on **Issues > List** from the left vertical navigation menu to get to the Issues screen.\n\n2. Then click on the **New Issue** button\n\n3. On the **New Issue** window:\n\n3.1. In the Title field, enter \"External status check demo\"\n\n3.2. In the Description field, enter \"Issue to demonstrate an external status check\"\n\n3.3. Click on **Assign to me** next to the **Assignees** field\n\n3.4. Click on the **Create issue** button at the bottom of the window\n\n\u003C!--\n![issue create window](https://about.gitlab.com/images/blogimages/how-to-status-checks/3-issue-create-window.png){: .shadow.small.center.wrap-text}\nCreating an issue\n{: .note.text-center}\n-->\n\n\u003Cimg src=\"/images/blogimages/how-to-status-checks/3-issue-create-window.png\" width=\"75%\" height=\"75%\">\nCreating an issue\n{: .note.text-center}\n\nOnce the issue is created, you will be in the detail issue window.\n\n4. Click on the **Create merge request** button on the right hand side of the detailed issue window.\n\n![create a merge request](https://about.gitlab.com/images/blogimages/how-to-status-checks/4-create-merge-req.png){: .shadow.small.center.wrap-text}\nCreating a merge request\n{: .note.text-center}\n\nOnce the merge request is created, you will be in the detail merge request window.\n\n5. Click on the **Open in Web IDE** button on the right hand side of the detailed merge request window:\n\n![open webIDE](https://about.gitlab.com/images/blogimages/how-to-status-checks/5-open-webide.png){: .shadow.small.center.wrap-text}\nOpening the Web IDE\n{: .note.text-center}\n\n6. Make a minor update to the application. In the sample project **my-proj**, I modified two files: DemoApplication.java and DemoApplicationTests.java.\n\n6.1. In the DemoApplication.java class, I added the word \"today\" to the string returned by a call to this class:\n\n![update DemoApp](https://about.gitlab.com/images/blogimages/how-to-status-checks/6-update-demoapp.png){: .shadow.small.center.wrap-text}\nMaking a simple update to DemoApplication.java\n{: .note.text-center}\n\n6.2. In the DemoApplicationTests.java class, which is a unit test for DemoApplication.java, I also added the word \"today\" to the string in the *assertThat()* invocation to match the value returned by a call to the DemoApplication.java class:\n\n![update DemoAppTests](https://about.gitlab.com/images/blogimages/how-to-status-checks/7-update-demoapptests.png){: .shadow.small.center.wrap-text}\nMaking a simple update to DemoApplicationTests.java\n{: .note.text-center}\n\n7. Click on the **Commit…** button at the bottom of the Web IDE window. And then ensure to select the feature branch for the merge request before clicking on the **Commit** button again:\n\n\u003C!--\n![committing to feature branch](https://about.gitlab.com/images/blogimages/how-to-status-checks/8-click-commit.png){: .shadow.small.center.wrap-text}\nCommitting to the feature branch\n{: .note.text-center}\n-->\n\n\u003Cimg src=\"/images/blogimages/how-to-status-checks/8-click-commit.png\" width=\"30%\" height=\"30%\">\nCommitting to the feature branch\n{: .note.text-center}\n\n8. Go back to the merge request detail window by clicking on the merge request number on the bottom margin of the window:\n\n\u003C!--\n![click on merge request link](https://about.gitlab.com/images/blogimages/how-to-status-checks/9-click-mr-at-bottom.png){: .shadow.small.center.wrap-text}\nClicking on merge request link at bottom of window\n{: .note.text-center}\n-->\n\n\u003Cimg src=\"/images/blogimages/how-to-status-checks/9-click-mr-at-bottom.png\" width=\"75%\" height=\"75%\">\nClicking on merge request link at bottom of window\n{: .note.text-center}\n\n9. On the detail merge request window, scroll down until you see a section titled **Status checks 1 pending**. This is the merge request widget that lists all external status checks associated with merge requests. Click on the **Expand** button on the right hand side of this section:\n\n![expanding status checks widget](https://about.gitlab.com/images/blogimages/how-to-status-checks/10-click-on-expand.png){: .shadow.small.center.wrap-text}\nExpanding the status checks widget in the merge request\n{: .note.text-center}\n\n10. In the expanded section, you will see an entry for the external status check you defined above, whose name is *compliance-check*. Notice that to the left of its name, there is a pause symbol indicating to the merge request stakeholders that the check is still in progress and has not communicated its approval to the merge request yet:\n\n![list of status checks](https://about.gitlab.com/images/blogimages/how-to-status-checks/11-status-checks-widget-expanded.png){: .shadow.small.center.wrap-text}\nList of external status checks\n{: .note.text-center}\n\n11. In a real life scenario, the pause symbol would change to a green checkmark when the external status check communicates to GitLab that the compliance validation is finished, i.e. the merge request has been approved by the external service:\n\n![status checks passed](https://about.gitlab.com/images/blogimages/how-to-status-checks/12-status-check-passed.png){: .shadow.small.center.wrap-text}\nStatus checks that have passed\n{: .note.text-center}\n\n### How does an external status check inform GitLab that it has approved the merge request\n\nUsing an external status check integrates GitLab merge requests to a home-grown or SaaS application, for example, by invoking an API of this external system. Once this external system does its compliance validation or check, then it needs to inform GitLab that it has approved the merge request. To do this, the external system API must make use of the [GitLab external status checks API](https://docs.gitlab.com/ee/api/status_checks.html) to communicate to GitLab that the MR is approved. This is a 2-step process:\n\n1. The first step is to get the ID of the external status check you need to approve. Here is an example of how to invoke the GitLab API to do this:\n\n> curl --request GET --header \"PRIVATE-TOKEN: \u003Creplace with your GitLab API token>\" \"https://gitlab.com/api/v4/projects/28933616/merge_requests/1/status_checks\"\n\nAn example of what the command above will return follows:\n\n> [{\"id\":86,\"name\":\"compliance-check\",\"external_url\":\"https://tech-marketing-sandbox-cd-compvalidator.compliance.gitlabworkshops.io/validate\",\"status\":\"pending\"}]\n\nThe example return value above shows that the ID of the external status check that we’d like to approve is 86.\n\n> **NOTE:** Although I'm showing an example of how to invoke the GitLab API above using the *curl* command, the idea is that your external system API call would carry out any checks and validation and then it would assemble this message in a REST HTTP call back to GitLab to communicate its approval of the merge request.\n\n2. Once you have the ID of the external status check, you can then approve it by using the GitLab API. Here’s an example:\n\n> curl --request POST --header \"PRIVATE-TOKEN:\u003Creplace with your GitLab API token>\" \"https://gitlab.com/api/v4/projects/28933616/merge_requests/1/status_check_responses?sha=\u003Creplace with SHA at HEAD of the source branch>&external_status_check_id=86\"\n\nExecuting the REST API call above will approve the external status check on the GitLab merge request.\n\n```\nNOTE: to obtain the \u003CSHA at HEAD of the source branch>, here’s an example of the command you’d need to execute:\n\n$ git ls-remote https://gitlab.com/tech-marketing/sandbox/cd/my-proj.git\n\nThe URL in the preceding line is the URL to the git project for your merge request. And here’s an example of the output of the preceding command:\n\nad1eeee497c99466797a1155f514d3c0c2f0cc45\tHEAD\n9e209c8d409a0867c1df4e0965aa675277176137\trefs/heads/1-external-status-check-demo\nad1eeee497c99466797a1155f514d3c0c2f0cc45\trefs/heads/master\n9e209c8d409a0867c1df4e0965aa675277176137\trefs/merge-requests/1/head\n```\n\nIn the output above, the SHA for the feature branch associated with the merge request is *9e209c8d409a0867c1df4e0965aa675277176137*\n\n## What we've learned\n\nGitLab recently introduced \"external status checks for merge requests,\" which are effectively API calls to systems/application that sit outside GitLab. As you could see, with external status checks for merge requests, we were able to integrate GitLab with a third-party system that required manual approval for a merge request, ensuring that your application updates meet compliance and audit requirements.\n\nFor a demo of this feature in action, watch the video below:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/v4iY8qMvFLo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n",[232,9,722],{"slug":2378,"featured":6,"template":699},"how-to-status-checks","content:en-us:blog:how-to-status-checks.yml","How To Status Checks","en-us/blog/how-to-status-checks.yml","en-us/blog/how-to-status-checks",{"_path":2384,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2385,"content":2391,"config":2396,"_id":2398,"_type":13,"title":2399,"_source":15,"_file":2400,"_stem":2401,"_extension":18},"/en-us/blog/how-to-strengthen-agile-teams-with-tuckmans-model",{"title":2386,"description":2387,"ogTitle":2386,"ogDescription":2387,"noIndex":6,"ogImage":2388,"ogUrl":2389,"ogSiteName":685,"ogType":686,"canonicalUrls":2389,"schema":2390},"Strengthen your Agile teams with Tuckman's stages of group development","Learn how to build up your agile teams  teams after breaking down silos for further group development","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680924/Blog/Hero%20Images/tuckmansstages.jpg","https://about.gitlab.com/blog/how-to-strengthen-agile-teams-with-tuckmans-model","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Strengthen your Agile teams with Tuckman's stages of group development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2019-12-13\",\n      }",{"title":2386,"description":2387,"authors":2392,"heroImage":2388,"date":2393,"body":2394,"category":300,"tags":2395},[852],"2019-12-13","\n\nThe silos between development and operations teams are a common source of friction and bottlenecks. When teams battle silos, cycle time increases and business value stalls. Recently, software leaders have learned how to break down silos through communication and collaboration, but learning how to rebuild teams is a greater challenge. How can teams come together when their traditional behaviors and relationships have changed?\n\n## The answer: Tuckman's stages of group development\n\nIn 1965, psychologist Bruce Tuckman [published a study on the developmental sequence in   small groups](http://web.mit.edu/curhan/www/docs/Articles/15341_Readings/Group_Dynamics/Tuckman_1965_Developmental_sequence_in_small_groups.pdf). His findings highlighted the importance of four stages of development - forming, storming, norming, and performing - in order for a group to ideate, collaborate, plan, and deliver. \n\n**In the forming stage**, groups identify challenges and goals. Team members orient themselves to acceptable interpersonal behaviors and test boundaries to guide their interactions. **In the storming stage**, team members build trust by sharing their thoughts, which oftentimes leads to conflict, and discover various working styles. **In the norming stage**, the group resolves their differences and begins building a stronger sense of community and closeness. Individuals understand that they have common goals and must work together to achieve them. **In the performing stage**, the team achieves goals, functions independently, and resolves conflicts. Team members support each other and are more flexible in their roles.\n\n## How to strengthen Agile teams\n\nWhen leaders break down silos, team members often feel adrift due to the sudden cultural shift. To prevent a dysfunctional culture in which individuals don’t trust and support each other, leaders must make group development a priority. Applying Tuckman’s four stages to [Agile team development](/solutions/agile-delivery/) can result in a stronger dynamic. \n\n### Forming\n\nWhen management forms an Agile team, considering strengths and skills is a necessary aspect of purposefully curating a team. Team members should complement each other but not mirror each other, since the goal of an Agile team is to have a cross-functional team in which various members bring their strengths to work together. \n\nAfter eliminating silos, leaders must model and identify the behavior they want the team to adopt. Team members will look to a leader, such as a Scrum Master, for guidance. It’s typical for individuals to focus solely on their work rather than view the team as a collective entity working towards a goal. When this happens, it’s up to the Scrum Master to help individuals develop a shared mentality. After each ideation or sprint, the Scrum Master should gather the team to conduct a retrospective to understand what went well, what went wrong, and how to improve during the next ideation. Team members can work together to identify goals, assisting in the development of a sense of community. \n\n### Storming\n\nOnce individuals begin to see each other as teammates, conflict can arise, since people feel more comfortable sharing their opinions. When rebuilding teams after eliminating silos, it’s natural for individuals to shift blame onto others, so the goal in this stage is to cultivate trust, communication, and collaboration. \n\nThe Scrum Master is responsible for helping teammates resolve conflict, manage tension, and coach behaviors. As a calming influence on the team, the Scrum Master can quickly resolve conflicts and help the team remain productive. By documenting decisions, committing to transparency and visibility, and collaborating to determine solutions, teams can create an open culture in which experimentation is embraced and shortcomings are viewed as learning opportunities. Team members should continue to feel safe dissenting and sharing thoughts, but the focus should be on continuous improvement and identifying solutions rather than placing blame. \n\n### Norming \n\nTransitioning from Storming to Norming can be a difficult endeavor for many Agile teams, but once the shift is made, the focus becomes empowerment and implementation. After learning how to resolve conflict in the previous stage, the team is now able to embrace differences and view challenges from multiple perspectives. \n\nRetrospectives should become a ritual that occurs after each sprint. When the team moves to Norming, the next retro should set aside time to plan for sustainable delivery. The Scrum Master and other leaders should provide feedback to team members, while teammates provide feedback on processes and workflows. At this point in the group’s development, the individuals see themselves as part of a team working towards shared goals. There is mutual trust and open communication, and the team works together as a cohesive unit.\n\n### Performing\n\nAt this stage, the team is highly motivated and interested in expanding their efforts. Leadership should assume a supporting role, since the team now functions autonomously with an emphasis on continuous learning. Because teams seek to improve, they’re able to identify bottlenecks, the potential for silos, and impediments to innovation. \n\nThe team is now fully formed and productive. Individuals collaborate and communicate well, and they have a strong sense of identity and vision. The Agile team consistently delivers and embraces change. \n\nAny time groups evolve or new leadership walks through the door, teams can feel insecure and relive one or more of these stages. By implementing these techniques with your team, you can support your team’s growth and development, helping them maintain a strong Agile methodology and culture.\n\nCover image by [Markus Spiske](https://unsplash.com/@markusspiske?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/vrbZVyX2k4I)\n{: .note}\n",[744,696,9],{"slug":2397,"featured":6,"template":699},"how-to-strengthen-agile-teams-with-tuckmans-model","content:en-us:blog:how-to-strengthen-agile-teams-with-tuckmans-model.yml","How To Strengthen Agile Teams With Tuckmans Model","en-us/blog/how-to-strengthen-agile-teams-with-tuckmans-model.yml","en-us/blog/how-to-strengthen-agile-teams-with-tuckmans-model",{"_path":2403,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2404,"content":2410,"config":2416,"_id":2418,"_type":13,"title":2419,"_source":15,"_file":2420,"_stem":2421,"_extension":18},"/en-us/blog/how-we-are-closing-the-gap-on-replicating-everything-in-gitlab-geo",{"title":2405,"description":2406,"ogTitle":2405,"ogDescription":2406,"noIndex":6,"ogImage":2407,"ogUrl":2408,"ogSiteName":685,"ogType":686,"canonicalUrls":2408,"schema":2409},"How we are closing the gap on replicating *everything* in GitLab Geo","Developing an internal framework to enable other teams to add Geo support for their features","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669673/Blog/Hero%20Images/engineering.png","https://about.gitlab.com/blog/how-we-are-closing-the-gap-on-replicating-everything-in-gitlab-geo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we are closing the gap on replicating *everything* in GitLab Geo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Kozono\"}],\n        \"datePublished\": \"2021-04-29\",\n      }",{"title":2405,"description":2406,"authors":2411,"heroImage":2407,"date":2413,"body":2414,"category":1239,"tags":2415},[2412],"Michael Kozono","2021-04-29","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nIn early 2020, it took 3.5 months of solid work to implement replication of a new data type in Geo. One year later, support can be added within a month -- including development and all required reviews. How did we do it? First, let me introduce you to Geo.\n\n## What is Geo?\n\n[GitLab Geo](https://about.gitlab.comhttps://docs.gitlab.com/ee/administration/geo/index.html) is the solution for widely distributed development teams and for providing a warm-standby as part of a disaster recovery strategy. Geo replicates your GitLab instance to one or more local, read-only instances.\n\n## What are data types?\n\n[GitLab Geo was released in June 2016 with GitLab 8.9](https://about.gitlab.com/releases/2016/06/22/gitlab-8-9-released/#gitlab-geo-new-product) with the ability to replicate project repositories to a read-only secondary GitLab site. Developers located near secondary sites could fetch project repositories as quickly as if they were near the primary.\n\nBut what about wiki repositories? What about LFS objects or CI job artifacts? In GitLab, each of these things is represented by different Ruby classes, database tables, and storage configurations. In Geo, we call these data types.\n\n## Is it really that hard to copy data?\n\nWhen we say a new data type is supported by Geo, this is what we mean:\n\n* Backfill existing data to Geo secondary sites\n* As fast as possible, replicate new or updated data to Geo secondary sites\n* As fast as possible, replicate deletions to Geo secondary sites\n* Retry replication if it fails, for example due to a transient network failure\n* Eventually recover missing or inconsistent data, for example if Sidekiq jobs are lost, or if infrastructure fails\n* Exclude data according to [selective sync settings](https://docs.gitlab.com/ee/administration/geo/replication/configuration.html#selective-synchronization) on each Geo secondary site\n* Exclude remote stored data unless [Allow this secondary node to replicate content on Object Storage](https://docs.gitlab.com/ee/administration/geo/replication/object_storage.html#enabling-gitlab-managed-object-storage-replication) is enabled on a Geo secondary site\n* Verify data integrity against the primary data, after replication\n* Re-verify data integrity at regular intervals\n* Report metrics to Prometheus\n* Report metrics in the Admin UI\n* View replication and verification status of any individual record in the Admin UI\n* Replication and verification job concurrency is configurable in Admin UI\n* Retry replication if data mismatch is detected ([coming soon to all data types using the framework](https://gitlab.com/gitlab-org/gitlab/-/issues/301244))\n* Allow manual re-replication and re-verification in the Admin UI ([coming soon to all data types using the framework](https://gitlab.com/gitlab-org/gitlab/-/issues/216100))\n* And more\n\n## How to iterate yourself into a problem\n\n[Iteration is a core value](https://handbook.gitlab.com/handbook/values/#iteration) at GitLab. In the case of Geo, by [GitLab 12.3](https://about.gitlab.com/releases/2019/09/22/gitlab-12-3-released/#geo-natively-supports-docker-registry-replication) we had added replication support for the most important data types, for example:\n\n* Project Git repositories\n* Project wiki Git repositories\n* Issue/MR/Epic attachments\n* LFS objects\n* CI job artifacts\n* Container/Docker registry\n\nAnd we had added a slew of features around these data types. But suddenly it was clear we had a problem. **We were falling behind in the race to replicate and verify all of GitLab's data.**\n\n* A new data type was being added by other teams, every few months. It was painful to prioritize 3 months of development time only to add replication to one data type. And even if we caught up, the latest features would always be unsupported by Geo for 3 months.\n* Automatic verification of Project and Wiki repositories was implemented, but adding it to a single data type was going to take 3 months.\n* Maintenance and other new features were increasing in effort due to the amount of code duplication.\n* Our event architecture needed too much boilerplate and overhead to add new events\n\n## How to iterate yourself out of a problem\n\nJust because it's possible to iterate yourself into a problem doesn't mean iteration failed you. Yes, ideally we would have seen this coming earlier. But consider that fast and small iteration has likely saved many hours of upfront work on features that have been quickly validated, and have since been changed or removed. It's also possible to [DRY up](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself) code too soon into bad abstractions, which can be painful to tear apart.\n\nBut we reached a point where everyone agreed that the most efficient way forward required consolidating existing code.\n\n### Do the design work\n\n[Fabian](https://gitlab.com/fzimmer), our esteemed product manager, [proposed an epic](https://gitlab.com/groups/gitlab-org/-/epics/2161):\n\n> to build a new geo replication and verification framework with the explicit goal of enabling teams across GitLab to add new data types in a way that supports geo replication out of the box\n\nMost of the logic listed above in [Is it really that hard to copy data?](#is-it-really-that-hard-to-copy-data) is exactly the same for all data types. An internal framework could be used to significantly reduce duplication, which could deliver huge benefits:\n\n* Bugs in the framework only have to be fixed once, increasing reliability and maintainability.\n* New features could be added to the framework for all data types at once, increasing velocity and consistency.\n* Implementation details would be better hidden. Changes outside the framework become safer and easier.\n\nThe proposal went further than making it easy for *ourselves* to add Geo support to new data types. The goal was to make it easy for *non-Geo engineers* to do so. To achieve this goal, the framework must be easy to use, easy to understand, and well-documented. Besides the usual benefits of reducing duplication, this higher standard would help:\n\n* Minimize the effort to implement Geo support of new features, whether it's done by a Geo engineer or not.\n* Minimize lag time to add Geo support. If it's easy to do, and anyone can do it, then it's easy to prioritize.\n* Increase awareness in other teams that new features may require Geo support.\n* Influence the planning of new features. There are ways to make it more difficult to add Geo support. This is much easier to avoid during initial planning.\n\nAs a first step, Fabian [proposed creating a proof of concept of a framework](https://gitlab.com/gitlab-org/gitlab/-/issues/35540) leveraging lessons learned and incorporating improvements we already wanted to make to the existing architecture. The issue stimulated lots of design discussion in the team, as well as multiple POCs riffing off one another.\n\nThe biggest change was the introduction of a `Replicator` class which could be subclassed for every data type. The subclasses would contain the vast majority of the specifics to each data type.\n\nIn order to further reduce duplication, we also introduced the concept of a `Replicator strategy`. Most data types in GitLab could be categorized as blobs (simple files) or Git repositories. Within these categories, there was relatively little logic that needed to be specific to each data type. So we could encapsulate the logic specific to these categories in strategies.\n\nAnother significant decision was to make the event system more flexible and lightweight. We wanted to be able to quickly implement new kinds of events for a `Replicator`. We decided to do this without rewriting the entire event processing layer, by packaging and transmitting `Replicator` events within a single, generic event leveraging the existing heavyweight event system. We could then leave the old system behind, and after migrating all data types to the framework, we could easily replace it.\n\nOnce a vision is chosen, it can be difficult to see how to get there with small iterations. But there are often many ways to go about it.\n\n### Code\n\n#### High-level approach\n\nAt a high-level, we could have achieved our goal by taking two data types that were already supported, DRYing up their code, and refactoring toward the desired architecture. This is a proven, safe, and effective method.\n\nBut to me it felt more palatable overall to deliver customer value along the way, by adding support for a brand-new data type while developing the reusable framework. We already had practice implementing many data types, so there was little risk that we would, for example, take too long or use suboptimal abstractions. So we decided to do this with [Package registry](https://docs.gitlab.com/ee/user/packages/).\n\n#### Lay the foundation\n\nOur POCs already answered the biggest open questions about the shape of the architecture. The next step was to get enough of a skeleton merged, as quickly as possible, so that we could unlock further parallel work. To ensure correctness, we aimed to get something working end-to-end. We decided to implement \"replication of newly created Package files\". Much was left out, for example:\n\n* Replication of changes. (Most Blob types, including Package files, are immutable anyway)\n* Replication of deletes\n* Backfill of existing files\n* Verification was left out entirely from the scope of the first epic, since we already knew replication alone provides most of the value to users.\n\nSince the work still required many specific design decisions, we decided to [pair program](https://en.wikipedia.org/wiki/Pair_programming). [Gabriel Mazetto](https://gitlab.com/brodock) and I used [Zoom](https://zoom.us/) and [Visual Studio Live Share](https://visualstudio.microsoft.com/services/live-share/), which worked well for us, though there are many options available. [See a recording of our first call](https://www.youtube.com/watch?v=2XedCiU634s).\n\n[The spike](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/23447) was merged and we thought ourselves safe under the feature flag. Looking back on this particular merge request, we did make a couple mistakes:\n\n1. An [autoloading bug was discovered](https://gitlab.com/gitlab-org/gitlab/-/issues/202044). The merge request was reverted, fixed, and remerged. Thanks to [CI](https://docs.gitlab.com/ee/ci/) and end-to-end QA tests using actual builds, the impact was limited.\n1. The size of the spike was unnecessarily large and difficult to review for a single merge request. As it grew, we should have used it as a \"reference\" merge request from which we could break out smaller merge requests. Since then, GitLab policies have further emphasized [smaller iterations](https://handbook.gitlab.com/handbook/product/product-principles/#iteration).\n\n#### Build on the foundation\n\nWith the skeleton of the framework in the main branch, we could implement multiple features without excessive conflicts or coordination. The feature flag was enabled on [GitLab's staging environment](https://about.gitlab.com/handbook/engineering/development/enablement/systems/geo/staging.html), and each additional slice of functionality was tested as it was merged. And new issues for bugs and missing features were opened.\n\nWe built up the [developer documentation](https://docs.gitlab.com/ee/development/geo/framework.html) as we went along. In particular, we documented specific instructions to implement a new data type, aimed at developers with no prior knowledge of Geo. These instructions have since been moved to issue templates. For example, [this is the template for adding support to a new Git repository type](https://gitlab.com/gitlab-org/gitlab/-/blob/master/.gitlab/issue_templates/Geo%20Replicate%20a%20new%20Git%20repository%20type.md). This caught a lot of would-be pain points for users of the framework.\n\nFinally, we released [Geo supports replicating GitLab Package Registries in GitLab 13.2](https://about.gitlab.com/releases/2020/07/22/gitlab-13-2-released/#geo-supports-replicating-gitlab-package-registries)!\n\n## Reaping the benefits\n\nFollowing the release of Geo support for Package Registries, we added support for many new data types in quick succession. Automatic verification was added to the framework. This recently culminated in a non-Geo engineer implementing replication *and verification* for a new data type, within one month!\n\n* In GitLab 13.5, [Geo replicates external merge request diffs and Terraform state files](https://about.gitlab.com/releases/2020/10/22/gitlab-13-5-released/#geo-replicates-external-merge-request-diffs-and-terraform-state-files). These were added by Geo engineers who had been less involved in building the framework. Many refinements to the framework, and especially to the documentation, came out of this.\n* In GitLab 13.7, [Geo supports replicating Versioned Snippets](https://about.gitlab.com/releases/2020/12/22/gitlab-13-7-released/#geo-supports-replicating-versioned-snippets). This was also added by a Geo engineer, and it was the first Git repository type in the framework, so it required more work than adding new Blob types.\n* In GitLab 13.10:\n  * [Geo supports replicating Group wikis](https://about.gitlab.com/releases/2021/03/22/gitlab-13-10-released/#geo-supports-replicating-group-wikis) was implemented by a non-Geo engineer.\n  * [Geo verifies replicated package files](https://about.gitlab.com/releases/2021/03/22/gitlab-13-10-released/#geo-verifies-replicated-package-files). This was a big new feature in the framework, adding automatic verification to any data type that can be checksummed.\n* GitLab 13.11:\n  * [Geo supports Pipeline Artifacts](https://about.gitlab.com/releases/2021/04/22/gitlab-13-11-released/#geo-supports-pipeline-artifacts) was implemented by a non-Geo engineer.\n  * [Geo verifies replicated Versioned Snippets](https://about.gitlab.com/releases/2021/04/22/gitlab-13-11-released/#geo-verifies-replicated-versioned-snippets).\n* GitLab 13.12:\n  * [An already supported data type, LFS objects, is migrated to the framework under feature flag](https://gitlab.com/gitlab-org/gitlab/-/issues/276696). Following this will be the migration of \"Uploads\" and \"CI Job artifacts\", and then **deleting thousands of lines of code**. This should improve both reliability and velocity, for example, verification will be added to these data types.\n\nIn aggregate:\n\n* In GitLab 12.9, we replicated ~56% of all data types (13 out of 23 in total) and verified ~22%.\n* In GitLab 13.11, we replicate ~86% of all data types (25 out of 29 in total) and verify ~45%.\n* **In the last year, GitLab released six new features that needed Geo support. We replicate 100% of those new features and verify ~57%.**\n\n## What did it cost?\n\nFor comparison, it took around 3.5 months to [implement replication of Design repositories](https://gitlab.com/groups/gitlab-org/-/epics/1633). It took around 6 months to [implement the framework for replication of Package files](https://gitlab.com/groups/gitlab-org/-/epics/2346). So the cost to produce the framework for replication was roughly 2.5 months of work.\n\nWe don't really have a comparable for [implementation of verification](https://gitlab.com/groups/gitlab-org/-/epics/1817), but it looked like it would take about 3 months to implement for a single data type, while it took about 4 months total to implement for Package files and simultaneously add to the framework, for a cost of about 1 month.\n\nGiven that new data types now take about 1 month to implement replication *and verification*, the work to produce the framework **paid for itself with the implementation of a single data type**. All the rest of the benefits and time saved are more icing on the cake.\n\nMy only regret is that we should have done it sooner. I intend to be more cognizant of this kind of opportunity in the future.\n\n## What to expect in the future\n\n* [Already supported data types will be migrated into the framework](https://gitlab.com/groups/gitlab-org/-/epics/3588)\n* New features will be added more quickly, for example, verification will be rolled out for all [Blob](https://gitlab.com/groups/gitlab-org/-/epics/5285) and [Git repository](https://gitlab.com/groups/gitlab-org/-/epics/5286) data types\n* Duplication will be further reduced, for example, by [leveraging Rails generators](https://gitlab.com/gitlab-org/gitlab/-/issues/326842)\n\nHuge thanks to everyone who contributed to closing the gap on replicating *everything* in Geo!\n",[744,696,1323,834,790,1241,9],{"slug":2417,"featured":6,"template":699},"how-we-are-closing-the-gap-on-replicating-everything-in-gitlab-geo","content:en-us:blog:how-we-are-closing-the-gap-on-replicating-everything-in-gitlab-geo.yml","How We Are Closing The Gap On Replicating Everything In Gitlab Geo","en-us/blog/how-we-are-closing-the-gap-on-replicating-everything-in-gitlab-geo.yml","en-us/blog/how-we-are-closing-the-gap-on-replicating-everything-in-gitlab-geo",{"_path":2423,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2424,"content":2430,"config":2436,"_id":2438,"_type":13,"title":2439,"_source":15,"_file":2440,"_stem":2441,"_extension":18},"/en-us/blog/how-we-reduced-mr-review-time-with-value-stream-management",{"title":2425,"description":2426,"ogTitle":2425,"ogDescription":2426,"noIndex":6,"ogImage":2427,"ogUrl":2428,"ogSiteName":685,"ogType":686,"canonicalUrls":2428,"schema":2429},"How we reduced MR review time with Value Stream Management ","The GitLab engineering team leverages VSM to pinpoint bottlenecks in the merge request review process and streamline software delivery. See how we do it and what we've learned.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097876/Blog/Hero%20Images/Blog/Hero%20Images/REFERENCE%20-%20display%20preview%20for%20blog%20images%20%282%29_2pKf8RsKzAaThmQfqHIaa7_1750097875817.png","https://about.gitlab.com/blog/how-we-reduced-mr-review-time-with-value-stream-management","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we reduced MR review time with Value Stream Management \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2025-02-20\",\n      }",{"title":2425,"description":2426,"authors":2431,"heroImage":2427,"date":2432,"body":2433,"category":832,"tags":2434},[1660],"2025-02-20","At GitLab, we're passionate about using our own products internally, a.k.a. dogfooding. Dogfooding has led to significant improvements in accelerating our software delivery cycle time for customers. This article spotlights a specific use case where [GitLab Value Stream Management (VSM)](https://about.gitlab.com/solutions/value-stream-management/) has driven significant improvements for our engineering team. You'll learn how VSM helped us tackle two critical challenges: measuring the journey from idea conception to merge request completion, and streamlining our deployment workflows.\n\n## The Challenge: Identifying bottlenecks in MR reviews\n\nDespite having well-defined workflows, one team noticed that MRs were taking longer than expected to be reviewed and merged. The challenge wasn’t just about the delays themselves, but about understanding *where* in the review process these delays were happening and *why*.\n\nOur team’s goal was clear:\n\n- Identify where time was being spent from the initial idea to the final merge of an MR.  \n- Pinpoint specific bottlenecks in the review process.  \n- Understand how MR size, complexity, or documentation quality affect review time.\n\n## The Approach: Measures the MR review time in GitLab Value Stream Analytics\n\nValue Stream Analytics (VSA) enables organizations to map their entire workflow from idea to delivery, distinguishing between value-adding activities (VA) and non-value-adding activities (NVA) in the process flow. By calculating the ratio of value-added time to total lead time, the team can identify wasteful activities resulting in delays in MR reviews.\n\nTo obtain the necessary metrics, the team customized GitLab VSA to gain better visibility into our MR review process.\n\n### 1. Setting up a custom stage for MR review\n\nThe team added a [new custom stage](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#value-stream-stage-events) in VSA called **Review Time to Merge** to specifically track the time from when a reviewer was first assigned to when the MR was merged.\n\n* Start event: MR first reviewer assigned  \n* End event: MR merged\n\nBy defining this stage, VSA began measuring the duration of the MR review process, giving us precise data on where time was being spent.\n\n![Defining stage of VSA](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097884/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097883929.png)\n\n### 2. Using the Total Time Chart for clarity\n\nWith the custom stage in place, the team used the [**Total Time Chart** on the VSA Overview page](https://about.gitlab.com/blog/value-stream-total-time-chart/) (**Analyze > Value Stream**) to visualize how much time was spent during the new MR Review stage. By comparing the values represented by each area on the chart, the team could quickly identify how this stage contributed to the total software delivery lifecycle (SDLC) time.\n\n![total time chart for VSA](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097884/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097883930.png)\n\n### 3. Drilling down for deeper insights\n\nTo investigate specific delays, the team used the **Stage Navigation Bar** to dive deeper into the MR Review stage. This view allowed them to:\n\n- Sort MRs by review time: The stage table showed all related MRs, sorted by review duration, making it easy to detect slow MRs.  \n- Analyze individual MRs: For each MR, that team could examine factors such as reviewer assignment delays, multiple rounds of feedback, idle time after approval, and MR size/complexity.\n\n## The outcome: Actionable insights and improvements\n\nBy customizing VSA to track [MR review time](https://docs.gitlab.com/user/project/merge_requests/reviews/), the team uncovered several key insights:\n\n- **Delays in reviewer assignment:** Some MRs experienced delays because reviewers were assigned late, or reviewers had too many MRs in their queue.  \n- **Slow review start times:** Even after assignment, certain MRs sat idle before reviews began, often due to context switching or competing priorities.  \n- **Multiple feedback loops:** Larger MRs often required multiple rounds of feedback, which extended review time significantly.  \n- **Idle time post-approval:** Some MRs were approved but not merged promptly, often due to deployment coordination issues.\n\nFor the engineering manager on the team, VSA proved to be transformational/valuable in managing their team's workflow: *\"I've used the VSA to justify where we were spending time in MR completion. We have VSA customized to our needs, and it's been very beneficial to our investigations for opportunities for improvements.”* \n\nAlso, from this dogfooding experience, we’re now developing a key enhancement to improve visibility into the review process. We're adding a new event to VSA — [Merge request last approved at](https://gitlab.com/gitlab-org/gitlab/-/issues/503754) — which creates a stage that breaks down MR review steps even further for granular visibility.\n\n## The power of data-driven decisions\n\nBy leveraging GitLab’s VSA, we didn’t just identify bottlenecks – we gained actionable insights that led to measurable improvements in MR review time and overall developer productivity. We optimized merge request review cycles and increased developer throughput, validating our commitment to continuous improvement through measurement.\n\n> Want to learn more about how VSA can help your team? [Start a free  trial of GitLab Ultimate](https://about.gitlab.com/free-trial/), customize your value streams, and see how you can make improvements throughout the SDLC for your teams. Then, make sure to [share your feedback and experiences in this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/520962).\n\n## Read more\n\n- [Optimize value stream efficiency to do more with less, faster](https://about.gitlab.com/the-source/platform/optimize-value-stream-efficiency-to-do-more-with-less-faster/)\n- [New Scheduled Reports Generation tool simplifies value stream management](https://about.gitlab.com/blog/new-scheduled-reports-generation-tool-simplifies-value-stream-management/)\n- [Value stream analytics documentation](https://docs.gitlab.com/user/group/value_stream_analytics/)\n- [Value stream management: Total Time Chart simplifies top-down optimization flow](https://about.gitlab.com/blog/value-stream-total-time-chart/)\n",[767,834,495,9,2435],"solutions architecture",{"slug":2437,"featured":6,"template":699},"how-we-reduced-mr-review-time-with-value-stream-management","content:en-us:blog:how-we-reduced-mr-review-time-with-value-stream-management.yml","How We Reduced Mr Review Time With Value Stream Management","en-us/blog/how-we-reduced-mr-review-time-with-value-stream-management.yml","en-us/blog/how-we-reduced-mr-review-time-with-value-stream-management",{"_path":2443,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2444,"content":2450,"config":2455,"_id":2457,"_type":13,"title":2458,"_source":15,"_file":2459,"_stem":2460,"_extension":18},"/en-us/blog/how-we-use-gitlab-at-the-province-of-nova-scotia",{"title":2445,"description":2446,"ogTitle":2445,"ogDescription":2446,"noIndex":6,"ogImage":2447,"ogUrl":2448,"ogSiteName":685,"ogType":686,"canonicalUrls":2448,"schema":2449},"How we use GitLab at the Province of Nova Scotia","The Unix operations team at the Province of Nova Scotia decided to implement GitLab for source control and CI/CD. Here's how we started exploring DevOps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670226/Blog/Hero%20Images/how-we-use-gitlab-at-nova-scotia.jpg","https://about.gitlab.com/blog/how-we-use-gitlab-at-the-province-of-nova-scotia","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we use GitLab at the Province of Nova Scotia\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Steven Zinck\"},{\"@type\":\"Person\",\"name\":\"Paul Badcock\"}],\n        \"datePublished\": \"2017-07-18\",\n      }",{"title":2445,"description":2446,"authors":2451,"heroImage":2447,"date":2452,"body":2453,"category":832,"tags":2454},[1402,1403],"2017-07-18","\n\nIn 2015 the Unix operations team at the Province of Nova Scotia decided to implement GitLab for source control and [Continuous Integration and Continuous Deployment](/solutions/continuous-integration/). This was the beginning of our foray into DevOps practices. This article describes our automated testing, integration and release of Puppet code.\n\n\u003C!-- more -->\n\n![Image via Steve Zinck and Paul Badcock](https://about.gitlab.com/images/blogimages/nova-scotia-devops/devops-infinity-graphic.png){: .shadow}\u003Cbr>\n\nYou can also learn more about our DevOps transformation by watching our recent interview:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/SHdeqznJXbc\" frameborder=\"0\" allowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\n### Source control\n\nA source control management (SCM) system allows the user to “commit” code, documentation and other system artifacts such as configuration files to a central location. Each change results in a new version of the file, and previous versions of the file remain available on the SCM. Restoring a previous version is quick and easy.\n\nWe needed a way for multiple sysadmins to be able to work on code without colliding with one another. We also needed a way to vet changes through a peer review process. GitLab makes this easy thanks to its support of branching and merge requests. Branching allows a sysadmin to create an individual copy of the production code (“master”) and work with it in isolation — this allows multiple team members to be working on the same production code base without being concerned about conflicts between their work.\n\n### Continuous integration\n\n![Image via Steve Zinck and Paul Badcock](https://about.gitlab.com/images/blogimages/nova-scotia-devops/ci-cd-workflow.png){: .shadow}\u003Cbr>\n\nAs we built out more of our infrastructure with Puppet, we needed an automated way of testing our code. Over time, our test strategy has evolved to include automated [syntax checking](https://puppet.com/blog/verifying-puppet-checking-syntax-and-writing-automated-tests), [linting](http://puppet-lint.com/), [unit](https://puppet.com/blog/unit-testing-rspec-puppet-for-beginners) and [integration](http://serverspec.org/) tests. Manual testing was not sufficient, as it was often forgotten about and was very time consuming. Automated testing solved that — for every code commit, the test pipeline is executed. A complete test cycle currently takes under five minutes.\n\nOn each code commit to a branch other than master, the following test pipeline is kicked off by GitLab CI:\n\n![Image via Steve Zinck and Paul Badcock](https://about.gitlab.com/images/blogimages/nova-scotia-devops/ci-screenshot.png){: .shadow}\u003Cbr>\n\nIf at any point a job fails, the pipeline stops and the sysadmin is notified. One of the great features of GitLab CI is its tight integration with Docker — each of the jobs above is run inside its own isolated container. The syntax-lint-spec job verifies that the Puppet syntax is good; linting confirms the code conforms to best practices; and spec confirms that logically the code functions as designed.\n\nThe test-kitchen jobs are a full suite of [ServerSpec](http://serverspec.org/) tests. We automatically provision four containers that represent our four most common configurations. Our Puppet code is applied to each container to verify that it will work in our production environment. This acts as a full regression test each time a code commit is made, and ensures that there were no unintended problems introduced. It gives us confidence that the code is actually doing what it’s intended to do.\n\n### Continuous deployment\n\nOnce all of the tests pass, the sysadmin can submit a merge request for their branch, and it will be reviewed by a senior staff member before reaching production. This is an important part of our workflow, because it gives junior staff the confidence that a more senior member of the team will review and approve a change before it reaches any of our servers. If the merge request is accepted, the branch will be merged into master and at that point GitLab CI will push the code to our Red Hat Satellite and Puppet Enterprise servers where it will be deployed to our environment.\n\n![Image via Steve Zinck and Paul Badcock](https://about.gitlab.com/images/blogimages/nova-scotia-devops/cd-screenshot.jpeg){: .shadow}\u003Cbr>\n\nYou can find the configuration files (Dockerfiles, .kitchen.yml, .gitlab-ci.yml and Satellite push script) at our [GitHub](https://github.com/nsgov).\n\nThe implementation of our system automation strategy and the toolset we selected has proven itself many times. We are spending less time fighting fires due to the streamlined and tested nature of our deployments and have earned the confidence of our clients.\n\n### The road ahead\n\nIn upcoming articles, we’ll write about the CI/CD process we built with [Communications Nova Scotia](https://novascotia.ca/cns/) that allows their development team to deploy and roll back their Dockerized application environment on demand. We also plan to write about our automated test strategy for Red Hat Ansible.\n\nThis post originally appeared on [*Medium*](https://medium.com/@szinck/how-we-use-gitlab-at-the-province-of-nova-scotia-708b514cc47f).\n\n## About the Guest Authors\n\n[Steve Zinck](https://www.linkedin.com/in/stevezinck/) spent most of his career working in the Public Service as a Unix and Infrastructure administrator. Over the past few years, he's started to transition away from traditional systems administration and begun to focus on software delivery and automation. As part of that transition, his team has implemented GitLab at the core of our automation and software delivery stack. His current focus is working with software and application teams to assist in streamlining their deployment and delivery process.\n\n[Paul Badcock](https://www.linkedin.com/in/pbadcock/?ppe=1) started working in the IT sector in 1998 with positions in small startups, to large fortune 500 companies, to currently on a public-sector team. His career was focused as a traditional IT Linux administrator until in the mid-2000s he started focusing on adopting development tooling, practices and methodologies for operational teams. This work culminated in implementing an early 2010s DevOps workplace framework with the help of @stewbawka and subsequently working with like-minded teams since. As a part of adopting developer tools he has previously worked with and managed CVS, SVN installations and various vendor products before reading a “Show HN” posting on Hacker News about GitLab.\n",[722,108,9],{"slug":2456,"featured":6,"template":699},"how-we-use-gitlab-at-the-province-of-nova-scotia","content:en-us:blog:how-we-use-gitlab-at-the-province-of-nova-scotia.yml","How We Use Gitlab At The Province Of Nova Scotia","en-us/blog/how-we-use-gitlab-at-the-province-of-nova-scotia.yml","en-us/blog/how-we-use-gitlab-at-the-province-of-nova-scotia",{"_path":2462,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2463,"content":2469,"config":2475,"_id":2477,"_type":13,"title":2478,"_source":15,"_file":2479,"_stem":2480,"_extension":18},"/en-us/blog/how-we-used-gitlab-to-automate-our-monthly-retrospectives",{"title":2464,"description":2465,"ogTitle":2464,"ogDescription":2465,"noIndex":6,"ogImage":2466,"ogUrl":2467,"ogSiteName":685,"ogType":686,"canonicalUrls":2467,"schema":2468},"How we use GitLab to automate our monthly retrospectives","How one engineering team is using GitLab CI to automate asynchronous retrospectives, making collaboration across four continents a breeze.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670529/Blog/Hero%20Images/automate-retrospectives.jpg","https://about.gitlab.com/blog/how-we-used-gitlab-to-automate-our-monthly-retrospectives","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we use GitLab to automate our monthly retrospectives\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sean McGivern\"}],\n        \"datePublished\": \"2019-03-07\",\n      }",{"title":2464,"description":2465,"authors":2470,"heroImage":2466,"date":2472,"body":2473,"category":832,"tags":2474},[2471],"Sean McGivern","2019-03-07","\n\nAs an [Engineering\nManager] at GitLab I spend most of\nmy working day using GitLab for a variety of tasks – from using [issue boards](/stages-devops-lifecycle/issueboard/) for team assignments, [epics](https://docs.gitlab.com/ee/user/group/epics/) for tracking longer-term initiatives, and [todos](https://docs.gitlab.com/ee/user/todos.html) and notifications to manage my own workflow.\n\nWe also use GitLab in a number of unconventional ways, so I wanted to share with you one interesting use case we've been experimenting with.\n\n[Engineering Manager]: /handbook/engineering/management/\n\n## GitLab stage group retrospectives\n\nEach [stage group](/stages-devops-lifecycle/) at GitLab has its [own retrospective], which then feeds into the\n[GitLab-wide retrospective] we have for each monthly release.\n\n[own retrospective]: /handbook/engineering/management/group-retrospectives/\n[GitLab-wide retrospective]: /handbook/engineering/workflow/#retrospective\n\nThe [Plan team](/handbook/engineering/development/dev/plan/) is fairly widely\ndistributed: we have people on four continents, and only two members of the team\nare even in the same country as each other. We wanted to try [asynchronous\ncommunication] wherever possible, so we used GitLab issues for [our\nretrospectives], too.\n\nA quick note on terminology: we say [team] to refer to a manager – like me – and\ntheir reports. We say [stage group] to refer to the people who work on a\nparticular [DevOps stage], even across multiple teams. The Plan stage group is\neven more widely distributed.\n{: .note}\n\n[team]: /company/team/structure/#team-and-team-members\n[stage group]: /company/team/structure/#stage-groups\n[DevOps stage]: /handbook/product/categories/#devops-stages\n[asynchronous communication]: /handbook/communication#internal-communication\n[our retrospectives]: https://gitlab.com/gl-retrospectives/plan/issues?label_name[]=retrospective\n\n## Automating retrospective issue creation\n\nCreating the retrospective issue was fast, but adding links to notable\nissues that we shipped or that slipped was time consuming and\ntedious. In the spirit of [xkcd 1319], I decided to automate it, so I\ncreated the [async-retrospectives] project. This project makes\nretrospective issue creation a hands-off process:\n\n[xkcd 1319]: https://xkcd.com/1319/\n[async-retrospectives]: https://gitlab.com/gitlab-org/async-retrospectives\n\n1. It uses [scheduled pipelines] to create an issue on the 1st of each\n   month. As our [development month] runs from the 8th to the 7th, this\n   is a little early, but it allows the team to jot down any thoughts\n   they have while they are still working on the release.\n\n   ![](https://about.gitlab.com/images/blogimages/how-we-used-gitlab-to-automate-our-monthly-retrospectives/scheduled-pipelines.png){: .shadow}\n2. The issue is created using the standard [GitLab API], using a [protected\n   variable] to hold the credentials.\n3. When we create the issue, we use [quick actions] to add the correct\n   labels and due date in a convenient way. (This is also possible\n   without quick actions, but quick actions are more convenient for me\n   personally.)\n4. Another scheduled pipeline runs on the 9th of each month to update\n   the existing issue's description with the lists of issues (slipped,\n   shipped) I mentioned above.\n\n   We make our retrospectives public after we conclude them, so you can see this\n   in action on the [11.8 Plan retrospective]:\n\n   [![](https://about.gitlab.com/images/blogimages/how-we-used-gitlab-to-automate-our-monthly-retrospectives/11-8-plan-retrospective.png){: .shadow}][11.8 Plan retrospective]\n\n[scheduled pipelines]: https://docs.gitlab.com/ee/ci/pipelines/schedules.html\n[development month]: /handbook/engineering/workflow/#product-development-timeline\n[GitLab API]: https://docs.gitlab.com/ee/api/\n[protected variable]: https://docs.gitlab.com/ee/ci/variables/#protected-variables\n[quick actions]: https://docs.gitlab.com/ee/user/project/quick_actions.html\n[11.8 Plan retrospective]: https://gitlab.com/gl-retrospectives/plan/issues/22\n\nI only intended this for use in Plan, but a nice thing about a company where we\n[give agency] to people to solve their problems is that people like me are able\nto try out things that might not work globally, like this.\n\nAs it happened, it's also been [picked up by other teams and groups]. We\nconfigure the creation in a [YAML file], just like GitLab CI is configured, to\ntry to make it as easy as possible for other managers to contribute and set this\nup for their team.\n\n[give agency]: https://handbook.gitlab.com/handbook/values/#give-agency\n[picked up by other teams and groups]: https://gitlab.com/gitlab-org/async-retrospectives/merge_requests?state=merged\n[YAML file]: https://gitlab.com/gitlab-org/async-retrospectives/blob/master/teams.yml\n\n## Our experience running asynchronous retrospectives\n\n### What works\n\nWe've had a lot of positive experiences from these asynchronous\nretrospectives. In particular:\n\n1. No one is disadvantaged because of their time zone. If we had a video call\n   with our time zone spread, we'd have some people on that call in the middle of\n   their night, or missing out completely.\n2. Because they are written down from the start, and because comments in GitLab\n   are linkable, we can very easily refer to specific points in the future.\n3. Also, because they are written down, the comments can include links to\n   specific issues and merge requests to help other people get the same context.\n\n### What needs improvement\n\nAsynchronous retrospectives aren't perfect, of course. Some of the downsides\nwe've noticed are:\n\n1. Video calls are simply better for some things. In particular, the discussion\n   does not flow as smoothly in text as it can in a verbal conversation.\n\n   We also conduct our [engineering-wide retrospective] in a [public video\n   call], so we retain some opportunity for synchronous discussion.\n2. Similarly, team bonding is slower in text than in video calls.\n3. Participation can be lower if it's something you don't have to do right now,\n   but can always defer to a later date. We are continually [looking for ways to improve\n   this].\n\nOver all, we don't intend to go back to video calls for retrospectives,\nand we're really happy with the results. You can see all public\nretrospectives from the teams and groups at GitLab in the [GitLab\nretrospectives group on GitLab.com].\n\n[engineering-wide retrospective]: https://docs.google.com/document/d/1nEkM_7Dj4bT21GJy0Ut3By76FZqCfLBmFQNVThmW2TY/edit\n[public video call]: /2017/02/14/our-retrospective-and-kickoff-are-public/\n[looking for ways to improve this]: https://gitlab.com/gitlab-org/async-retrospectives/issues/12\n[GitLab retrospectives group on GitLab.com]: https://gitlab.com/gl-retrospectives\n\nPhoto by [Daniele Levis Pelusi](https://unsplash.com/photos/Pp9qkEV_xPk?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/automation?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[108,696,790,9],{"slug":2476,"featured":6,"template":699},"how-we-used-gitlab-to-automate-our-monthly-retrospectives","content:en-us:blog:how-we-used-gitlab-to-automate-our-monthly-retrospectives.yml","How We Used Gitlab To Automate Our Monthly Retrospectives","en-us/blog/how-we-used-gitlab-to-automate-our-monthly-retrospectives.yml","en-us/blog/how-we-used-gitlab-to-automate-our-monthly-retrospectives",{"_path":2482,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2483,"content":2489,"config":2496,"_id":2498,"_type":13,"title":2499,"_source":15,"_file":2500,"_stem":2501,"_extension":18},"/en-us/blog/improve-your-gitlab-productivity-with-these-10-tips",{"title":2484,"description":2485,"ogTitle":2484,"ogDescription":2485,"noIndex":6,"ogImage":2486,"ogUrl":2487,"ogSiteName":685,"ogType":686,"canonicalUrls":2487,"schema":2488},"10 tips to make you a productive GitLab user","Learn how quick actions can make you a more efficient GitLab user.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666717/Blog/Hero%20Images/cover-image.jpg","https://about.gitlab.com/blog/improve-your-gitlab-productivity-with-these-10-tips","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"10 tips to make you a productive GitLab user\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"},{\"@type\":\"Person\",\"name\":\"Roman Kuba\"}],\n        \"datePublished\": \"2021-02-18\",\n      }",{"title":2484,"description":2485,"authors":2490,"heroImage":2486,"date":2493,"body":2494,"category":832,"tags":2495},[2491,2492],"Michael Friedrich","Roman Kuba","2021-02-18","Most people know GitLab is a solid tool in today's DevOps workflows, with\ncode reviews, CI/CD, and project management all available for users in a\nsingle application. But there are always ways to be more efficient. Since we\nuse GitLab to develop GitLab, everyone has their own habits and hidden gems\nto speed things up.\n\n\nWe chatted about GitLab efficiency tips after seeing new [quick actions\nreleases in GitLab\n13.8](/releases/2021/01/22/gitlab-13-8-released/#display-all-available-quick-actions-in-autocomplete),\nand decided to share some of our favorite tips with GitLab users. We share\nour typical day-to-day workflows as an engineering manager (Roman) and a\ndeveloper (Michael) to show how quick actions make teams more productive and\nefficient.\n\n\n### Roman: Engineering manager starts planning\n\n\nI am an engineering manager on the [Create: Editor\nteam](https://handbook.gitlab.com/handbook/product/categories/features/#createeditor-group) at GitLab.\nOne of my responsibilities is capacity planning with product managers.\nPlanning happens every month for the next [GitLab release](/releases/).\nGitLab uses the [milestone\nfeature](https://docs.gitlab.com/ee/user/project/milestones/) to keep\neverything organized for the release. As planning goes on, I need to create\na new issue for a new feature in the Web IDE. The issue description uses a\n[description\ntemplate](https://docs.gitlab.com/ee/user/project/description_templates.html)\nwhich gets filled with the right context.\n\n\nBut instead of searching for the assignee in the dropdown, I just add a new\nline:\n\n\n```\n\n/assign @dnsmichi\n\n```\n\n\nAll quick actions start with a `/` character and will be interpreted by\nGitLab when the issue gets created. In addition to an assignee, issue labels\nneed to be applied as well.\n\n\n```\n\n/label ~\"type::feature\"\n\n```\n\n\nYou can also assign multiple labels at once:\n\n\n```\n\n/label ~devops::create ~group::editor ~\"Category::Web IDE\"\n\n```\n\n\n![GitLab Quick Actions: Multiple\nlabels](https://about.gitlab.com/images/blogimages/improve-your-gitlab-productivity-10-tips/quick_action_label_multiple.png)\n\nHow to apply multiple labels using GitLab quick actions.\n\n\nThe issue needs to be assigned to the next milestone. This can be done with\nanother quick action:\n\n\n```\n\n/milestone %13.10\n\n```\n\n\nNote that 13.9 release planning already happened last month. The [product\nkickoff](/direction/kickoff/) highlights the planned features.\n\n\nThe keyboard shortcut `cmd + enter` now creates the issue without clicking a\nbutton.\n\n\nSo far, we were able to complete a lot of the necessary workflows around\nissues in one go, and without ever leaving the text box.\n\n\nAfter reviewing the issue I created, I remembered that this issue should be\nassigned to the `FY22Q1 Performance OKRs` epic. Again, we can use a quick\naction. It’s important to note here that referencing an epic works with the\n`&` character. When we type this character, we can start to search for the\nepic by typing its name.\n\n\n```\n\n/epic & \u003Csearch>\n\n```\n\n\nThis will turn into something like this:\n\n\n```\n\n/epic &123\n\n```\n\n\nAll quick actions can be used in a new comment and again using `cmd + enter`\nto save it.\n\n\nThe `FY22Q1 Performance OKRs` epic still needs to be added to a parent\nengineering OKR epic. So I'll navigate to the now-linked epic and use\nanother quick action to set the parent epic.\n\n\n```\n\n/parent_epic & \u003Csearch>\n\n```\n\n\nWhen working with multiple levels of epics, remember to keep practicing\nquick actions to create visual epic trees quickly. That’s all for now from\nmy manager's side.\n\n\n### Michael: A developer starts with code\n\n\nI work on the [Developer Evangelism\nteam](/handbook/marketing/developer-relations/developer-evangelism/) at\nGitLab, and although I'm not technically a developer in the typical sense I\nstill work with code on a daily basis. The average day starts with a new\nto-do. Today's to-do points me to the new issue that Roman created. After\nreviewing the issue requirements and defining the changes to be implemented,\nI start work: I'll clean up the work environment, pull the latest changes\nfrom the default branch (main/master), and create a new Git branch in my\nlocal terminal.\n\n\nAfter a few commits, my work day nears its end. I decide to publish the\nlocal Git branch and create a new Merge Request (MR). After creating the MR,\nthe triage workflow kicks off. I mark the [MR as\ndraft](https://docs.gitlab.com/ee/user/project/merge_requests/drafts.html)\nto prevent the workflow from starting before the MR is ready:\n\n\n```\n\n/draft\n\n```\n\n\nThe next day, I continue working on the MR and finish everything that was\nplanned, so I need to remove the draft designation. The `draft` quick action\nis a toggle, so I can use it to assign and remove the `Draft` marker.\n\n\n```\n\n/draft\n\n```\n\n\nThe next step is to assign a reviewer for the MR. GitLab 13.7 added [merge\nrequest reviewers](/blog/merge-request-reviewers/), which means\nwe can leave the MR assignee untouched. I'll use the livesearch to assign\nthe right reviewer with a leading `@` character.\n\n\n```\n\n/assign_reviewer @ \u003Csearch>\n\n```\n\n\n![GitLab Quick Actions: Remove draft and assign\nreviewer](https://about.gitlab.com/images/blogimages/improve-your-gitlab-productivity-10-tips/quick_action_toggle_draft_assign_reviewer.png)\n\nHow to remove the draft and add a reviewer using GitLab quick actions.\n\n\nAfter the first round of review, I get feedback and items for follow-up.\nSince I am in the middle of a different tasks, I create a new to-do to\nremind myself of an open task to follow up on when I'm ready.\n\n\n```\n\n/todo\n\n```\n\n\nSince my work as a developer evanglist includes many topics and areas, I get\ndistracted with other high priority tasks throughout the day. Later in the\nweek, I'll come back to the MR. The review items have been addressed by team\nmember suggestions and all threads are resolved now. The reviewer approves\nthe MR with the quick action:\n\n\n```\n\n/approve\n\n```\n\n\nThe review process took a little while to complete, and because GitLab is a\nfast-changing project, the Git branch is outdated. I need to rebase against\nthe default branch.\n\n\nBut since I am already working on something else, I do not want to stop what\nI am doing currently to rebase. Then I remember: GitLab 13.8 added the\n`/rebase` quick action. This schedules a new background job that attempts to\nrebase the branch, and stops operations if it fails.\n\n\nI open the MR and create a new comment. I start typing the rebase quick\naction, followed by `cmd+enter` to send it:\n\n\n```\n\n/rebase\n\n```\n\n\n![GitLab Quick Actions:\nRebase](https://about.gitlab.com/images/blogimages/improve-your-gitlab-productivity-10-tips/quick_action_rebase.png){:\n.shadow.center}\n\nHow to rebase with GitLab quick actions.\n\n{: .note.text-center}\n\n\nPhew. It worked. The CI/CD pipeline is running, and I believe that the\nrebase did not break anything. I go to click the \"Merge after pipeline\nsucceeds\" button, and remember there's a quick action for that.\n\n\n```\n\n/merge\n\n```\n\n\nThe quick action takes into account what is configured for the project:\nEither merge when the pipeline succeeds or add it to the [Merge\nTrain](/blog/merge-trains-explained/).\n\n\nEverything happens automatically and I can continue working on other tasks.\nThe manager (in this case, Roman) sees the issue being closed automatically\nusing the `Closes` keyword. That's all from my developer's side.\n\n\nTip: [Automatically closing\nissues](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically)\nafter the MR has been merged is an amazing workflow for everyone, assuming\nthe manager has set the milestone accordingly.\n\n\nAt GitLab, we have documented our [engineering\nworkflows](/handbook/engineering/workflow/) which can be followed more\nefficiently with the quick actions shown in this blog post.\n\n\n### Quick actions + description templates = ❤️\n\n\nWe demonstrated different ways quick actions can be used to complete common\ntasks more efficiently. But they do not always have to be applied manually.\nOne shortcut is to just add them to [description\ntemplates](https://docs.gitlab.com/ee/user/project/description_templates.html)\nso you do not have to worry about remembering them all. This way, you can\nalso automatically assign users, add labels, and much more based on the\ntemplate you apply. Using description templates helps with project\ncontributions and allows everyone to focus on the feature proposal or bug\nreport.\n\n\nLet’s try it! Create a new project, navigate into \"Issues > Labels\" and\ngenerate a default set of labels. Next, open the Web IDE and add a new file\nin `.gitlab/issue_templates/bug.md`. Add the following content:\n\n\n```\n\n# Summary\n\n\n# Steps to reproduce\n\n\n1.\n\n1.\n\n1.\n\n\n\u003C!-- Do not edit the section below -->\n\n/label ~\"type::bug\"\n\n/assign @YOURUSER\n\n```\n\n\nFirst, replace YOURUSER with your username (make sure you're logged in).\nCommit the new file to the default branch, and navigate into the issue list.\nNext, create a new issue and select `bug` from the dropdown. Add some\ncontent, and submit the issue. Finally, verify that the label and assignee\nare both set.\n\n\nTip: This is not limited to issue templates, it also works with MRs and\nepics. At GitLab we also often use this function to dynamically assign\npeople based on reports created automatically. There are many opportunities\nto use description templates.\n\n\n### More tips and insights\n\n\nWe have not yet tried the following quick actions - can you help us out? :-)\n\n\n```\n\n/shrug\n\n/tableflip\n\n```\n\n\nThere are more [quick\nactions](https://docs.gitlab.com/ee/user/project/quick_actions.html) and\n[keyboard shortcuts](https://docs.gitlab.com/ee/user/shortcuts.html)\navailable. In fact, GitLab user [Gary Bell](https://gitlab.com/garybell)\nshared great insights on quick actions in his \"Tanuki Tuesday\" blog series:\n\n\n- [Quick Actions](https://www.garybell.co.uk/quick-actions-in-gitlab/)\n\n- [Keyboard\nShortcuts](https://www.garybell.co.uk/using-keyboard-shortcuts-in-gitlab/)\n\n\nLet us know in the comments below which quick actions most helped your\nproductivity and if you have other creative ways of using quick actions.\n\n\nPS: We also support shortcuts at GitLab, and the most loved shortcut is `cmd\n+ k` for inserting a Markdown URL.\n\n\nCover image by [Juan Gomez](https://unsplash.com/@nosoylasonia) on\n[Unsplash](https://unsplash.com/photos/kt-wA0GDFq8)\n\n{: .note}\n",[9,696,790],{"slug":2497,"featured":6,"template":699},"improve-your-gitlab-productivity-with-these-10-tips","content:en-us:blog:improve-your-gitlab-productivity-with-these-10-tips.yml","Improve Your Gitlab Productivity With These 10 Tips","en-us/blog/improve-your-gitlab-productivity-with-these-10-tips.yml","en-us/blog/improve-your-gitlab-productivity-with-these-10-tips",{"_path":2503,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2504,"content":2510,"config":2516,"_id":2518,"_type":13,"title":2519,"_source":15,"_file":2520,"_stem":2521,"_extension":18},"/en-us/blog/inside-our-new-development-team-lead-persona",{"title":2505,"description":2506,"ogTitle":2505,"ogDescription":2506,"noIndex":6,"ogImage":2507,"ogUrl":2508,"ogSiteName":685,"ogType":686,"canonicalUrls":2508,"schema":2509},"What are the best and worst parts about being a development team lead?","Dev leads, we feel you. Here's a deep dive into our interviews with development team leads, and the new persona they informed.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668224/Blog/Hero%20Images/inside-our-new-development-team-lead-persona.jpg","https://about.gitlab.com/blog/inside-our-new-development-team-lead-persona","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What are the best and worst parts about being a development team lead?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Katherine Okpara\"}],\n        \"datePublished\": \"2019-01-18\",\n      }",{"title":2505,"description":2506,"authors":2511,"heroImage":2507,"date":2513,"body":2514,"category":832,"tags":2515},[2512],"Katherine Okpara","2019-01-18","\nWelcome back to our series on the [new GitLab personas](https://handbook.gitlab.com/handbook/product/personas/)! I recently [wrote about what we learned from product managers during interviews](/blog/inside-our-new-product-manager-persona/) for our [UX research project to develop personas](https://gitlab.com/gitlab-org/ux-research/issues/77) for all product areas. In this post, I'll share some of the insights from our efforts to better understand development team leads, and introduce the resulting persona, [Delaney](https://handbook.gitlab.com/handbook/product/personas/#delaney-development-team-lead).\n\n## The research\n\nHere are some of the findings from my [six interviews](https://gitlab.com/gitlab-org/ux-research/issues/95) conducted for the persona.\n\nDevelopment team leads are often responsible for meeting with product managers and stakeholders to discuss scheduled feature requests, convert concepts into practical solutions, ensure that capacity is properly estimated, and assign work to developers. They are also involved in other duties such as creating design and functional specifications, writing code, documenting and automating processes, and mentoring other developers.\n\n### So, what’s the hardest part about being a development team lead?\n\nDue to the nature of their work, the challenges development team leads face often cross into several domains.\n\n#### Vague requirements and poor communication\n\nIt can be difficult to know the status of certain requirements when other team members don't update the various tools that are being used. Important information can get lost along the way, which often leads to repetitive discussions or fixing incorrect work. Many of the people we spoke with are looking for ways to have this information readily accessible and consistently communicated throughout their teams.\n\n> \"Sometimes the back and forth can be annoying, when the requirements aren’t clear and I have to go back a step to understand what is going on or a component is not what I wanted. At a previous company, the back and forth was especially drawn out since the team did not work closely together. At [my current] company, this problem isn’t as severe since I work closely with the team and can quickly ask for clarification if I need to. Working more efficiently saves a lot of time.\"\n\n####  Difficulty making accurate estimations of timeline and capacity\n\nA team lead must have a good understanding of the skillsets available on their team and use this insight to balance business objectives. In order to get a better sense of the experience levels of different team members, they often hold one-on-one meetings or conduct reviews during and after a development cycle.\n\n> \" ... This goes back to the burndown chart – if it's being used correctly, it can help you see where you’ll end up. In order for that to happen, you need your estimations to be accurate. And in order for _that_ to happen you need to figure out the accuracy of the baseline and experience of the developer. For example, someone who is more junior has less of a reference point. I have to assign extra points to stories, if there are unknown variables.\"\n\n#### Delivering on time\n\nWhen demand surpasses current capacity, it can be stressful to resolve existing problems without creating new issues that result from hasty work. It can also be difficult to explain technical limitations to stakeholders who are not involved in the development process.\n\n> \"Someone might see a code review request but feel conflicted since they only have two days left to finish their own tasks. So sometimes testers and customers are waiting on these code reviews to move forward ... The biggest thing would be having all those tickets, all of those changes, closely correlated with the actual changes in Git. 'For this particular feature, here are all the changes in Git.' You don’t have to read the codebase or fire up the whole application. You have the information all in one place and don’t need to hunt down information.\"\n\n#### Changing mindsets in organizations to adopt faster, iterative approaches\n\nSome development teams are slowed down by inefficient toolchains or outdated workflows because their organizations are resistant to change and adopting new practices. Introducing new ideas and methodologies can be an especially complex process in organizations that create products for industries with more restrictions and regulations than others.\n\n> \"Most blockers that arise are put in their own way. I would prefer to iterate while they rather plan everything out for long periods of time. Their own processes get in their way because they don’t think they can move faster. Many of their processes are filled with errors and take days or weeks. They’ve always done things a certain way and are not really willing to make a change.\"\n\n### What motivates a development team lead?\n\nOne of the biggest goals for many development team leads is the drive to continually optimize processes and deliver value to the product. They must also build a level of communication that enables them to assign tasks to the appropriate people, explain why certain feature requests are or are not feasible, and continue to implement strategic solutions.\n\n### What’s the best part about being a development team lead?\n\nThe best part of being a Development Team Lead is problem solving on a variety of levels – from tools to methodologies to team relations and more. When teams are well supported by their leaders and organizations, they are better equipped to meet the expectations that will move both the product and business forward!\n\n## The persona\n\n[![Delaney, Development Team Lead persona](https://about.gitlab.com/images/blogimages/delaney-dev-team-lead-persona.png)](https://handbook.gitlab.com/handbook/product/personas/#delaney-development-team-lead)\n\n### Want to share your experiences of GitLab with me?\n\nJoin [GitLab First Look](/community/gitlab-first-look/) and help us build an even better picture of who GitLab’s users really are!\n\n[Photo](https://unsplash.com/photos/atSaEOeE8Nk) by [Steven Lelham](https://unsplash.com/@slelham) on Unsplash\n{: .note}\n",[789,9,791],{"slug":2517,"featured":6,"template":699},"inside-our-new-development-team-lead-persona","content:en-us:blog:inside-our-new-development-team-lead-persona.yml","Inside Our New Development Team Lead Persona","en-us/blog/inside-our-new-development-team-lead-persona.yml","en-us/blog/inside-our-new-development-team-lead-persona",{"_path":2523,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2524,"content":2530,"config":2535,"_id":2537,"_type":13,"title":2538,"_source":15,"_file":2539,"_stem":2540,"_extension":18},"/en-us/blog/inside-our-new-product-manager-persona",{"title":2525,"description":2526,"ogTitle":2525,"ogDescription":2526,"noIndex":6,"ogImage":2527,"ogUrl":2528,"ogSiteName":685,"ogType":686,"canonicalUrls":2528,"schema":2529},"What do product managers need to do their best work?","Check out some of the findings that led to our new Product Manager Persona.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678857/Blog/Hero%20Images/investigating-how-product-managers-use-gitlab.jpg","https://about.gitlab.com/blog/inside-our-new-product-manager-persona","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What do product managers need to do their best work?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Katherine Okpara\"}],\n        \"datePublished\": \"2018-11-12\",\n      }",{"title":2525,"description":2526,"authors":2531,"heroImage":2527,"date":2532,"body":2533,"category":718,"tags":2534},[2512],"2018-11-12","\nRecently I spoke with several product managers and asked them about their experiences, as part of our [effort to create personas](/blog/personas-and-empathy-building/) for every one of GitLab's [product areas](https://handbook.gitlab.com/handbook/product/categories/). I gained a lot of insight through these interviews, including a better understanding of their daily duties, goals and motivations, challenges they face in their roles, and the tools they use throughout the software development lifecycle. Many of the findings have been included in our new [Product Manager Persona, Parker](https://handbook.gitlab.com/handbook/product/personas/), to help our own PMs brainstorm improvements and next steps for GitLab features. You can peruse the highlights and the persona itself below, and let us know what you think by tweeting us [@gitlab](https://twitter.com/gitlab)!\n\n## The research\n\nHere are some of the findings from my [eight interviews](https://gitlab.com/gitlab-org/ux-research/issues/88) conducted for the persona.\n\n### So, what’s the hardest part about being a product manager?\n\nThe product manager persona represents people who are responsible for prioritizing feature requests, product roadmapping, and tracking progress of the development of software applications. Since many of these factors depend on how other team members perform, most challenges related to communication and ensuring that their team delivers on time.\n\n#### Staying updated on team progress and important decisions\n\nIt can be difficult to know the status of certain requirements when other team members do not update the various tools that are being used. Important information can get lost along the way, which often leads to repetitive discussions or fixing incorrect work. Users were looking for ways to have this information readily accessible and consistently communicated throughout their teams.\n\n> \"Getting other people to use the tools. I need to make sure that other people are updating the Jira board for example – in my experience, many developers don’t exactly love to do this since it’s a tedious task. Or, if they have a question, adding it in the task so that we can keep a record of everything that’s being worked on. Sometimes someone will send me a question on Slack and I’ll copy-paste that into the ticket since sometimes it’s easier for me to do that than to ask someone to get used to doing that.\"\n\n#### Prioritizing features to build when dealing with limited resources\n\nProduct managers are often responsible for defining and scoping features, incorporating company objectives into the product roadmap, and giving developers and designers the requirements they need to deliver strong features. As a result, product teams often have trouble balancing feature requests with development capacity.\n\n> \"...Being able to find balance between being strategic and being practical. Being able to look into the future and be ambitious while at the same time having to put out fires and manage the day-to-day. Another challenge is staying in touch with the end user. We do not have as much time to be on top of the market and to interview customers. We're not as able to get market feedback and do market research as well...\"\n\n#### Simplifying information\u2028 for the different stakeholders involved in the product\n\nThe need to give clients and stakeholders timelines and estimates that are accurate but also realistic can be very stressful for a product manager. This is largely due to the fact that a cycle is often unpredictable. It can also be challenging to explain why certain features have been delayed or deprioritized, when customers and upper-level management are not working closely with the team.\n\n>\u2028\"Some of the challenges of working with the technical team leads is that they will forget to update things or they’ll give me a summary that is super technical so I have to ask more questions to make sure that I understand and have the ability to explain to other product managers where the developers are stuck, because they need more definition on what that feature should look like.\"\n\n### What motivates a product manager?\n\nProduct managers generally are motivated by the desire to deliver high-quality features in a timely manner. When company objectives shift, they want to have a standard process for communication, so that they can be in sync with all team members. They need to see an overview of all the relevant information related to a feature or product, so that they can monitor progress throughout a cycle. Additionally, they want to be able to help their teams accomplish more of their goals over time.\n\n### What’s the best part about being a product manager?\n\nAll in all, the interviewees all expressed the joy they receive from simply doing their jobs, whether that’s improving life for users or speeding up processes within the company. The best part of being a product manager is the opportunity to bring a concept to life and solve real problems for their users.\n\n## The persona\n\n![Parker, Product Manager persona](https://about.gitlab.com/images/blogimages/product-manager-persona.png){: .shadow.center}\n\nKeep an eye out for the rest of our series on the [new personas](https://handbook.gitlab.com/handbook/product/personas/)!\n\n[Photo](https://unsplash.com/photos/YiRQIglwYig) by [Hello I'm Nik](https://unsplash.com/@helloimnik) on Unsplash\n{: .note}\n",[789,790,9],{"slug":2536,"featured":6,"template":699},"inside-our-new-product-manager-persona","content:en-us:blog:inside-our-new-product-manager-persona.yml","Inside Our New Product Manager Persona","en-us/blog/inside-our-new-product-manager-persona.yml","en-us/blog/inside-our-new-product-manager-persona",{"_path":2542,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2543,"content":2549,"config":2554,"_id":2556,"_type":13,"title":2557,"_source":15,"_file":2558,"_stem":2559,"_extension":18},"/en-us/blog/interactive-take-a-guided-tour-of-the-devsecops-workflow",{"title":2544,"description":2545,"ogTitle":2544,"ogDescription":2545,"noIndex":6,"ogImage":2546,"ogUrl":2547,"ogSiteName":685,"ogType":686,"canonicalUrls":2547,"schema":2548},"Interactive: Take a guided tour of the DevSecOps workflow","Explore GitLab's recommended best practices for DevSecOps with a detailed visual depiction of the main steps in the development lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668969/Blog/Hero%20Images/blog-image-template-1800x945__1800_x_945_px_.png","https://about.gitlab.com/blog/interactive-take-a-guided-tour-of-the-devsecops-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Interactive: Take a guided tour of the DevSecOps workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2024-04-16\",\n      }",{"title":2544,"description":2545,"authors":2550,"heroImage":2546,"date":2551,"body":2552,"category":693,"tags":2553},[1340],"2024-04-16","When engaging in discussions with industry professionals and stakeholders, they quickly grasp the core principles of DevSecOps, which emphasize speed, security, and quality. However, there's often a curiosity about the specific strategies required to achieve optimal speed without compromising security and quality. We created this interactive infographic to showcase GitLab's best practices for [DevSecOps](https://about.gitlab.com/topics/devsecops/) through a detailed visual depiction of the main steps in the development lifecycle.\n\nWalk through every step of the DevSecOps process, including creation of [issues](https://docs.gitlab.com/ee/user/project/issues/), development and pushing of code, [security testing](https://about.gitlab.com/stages-devops-lifecycle/secure/), and deployment to production. Each step features a deep dive with additional resources such as demos, blog posts, and documentation.\n\n## Get started with the interactive tour\n\nClick on the image below to access the guided tour, and use the navigation buttons or keyword arrows to easily make your way through the flow.\n\n[![GitLab workflow description](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676928/Blog/Content%20Images/infographic.png)](https://tech-marketing.gitlab.io/static-demos/gitlab-infographic.html)\n\n\u003Cp>\u003C/p>\n\n> > Learn how [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI-powered features, further enhances the DevSecOps workflow.\n",[1159,9,108],{"slug":2555,"featured":90,"template":699},"interactive-take-a-guided-tour-of-the-devsecops-workflow","content:en-us:blog:interactive-take-a-guided-tour-of-the-devsecops-workflow.yml","Interactive Take A Guided Tour Of The Devsecops Workflow","en-us/blog/interactive-take-a-guided-tour-of-the-devsecops-workflow.yml","en-us/blog/interactive-take-a-guided-tour-of-the-devsecops-workflow",{"_path":2561,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2562,"content":2568,"config":2574,"_id":2576,"_type":13,"title":2577,"_source":15,"_file":2578,"_stem":2579,"_extension":18},"/en-us/blog/introducing-modelops-to-solve-data-science-challenges",{"title":2563,"description":2564,"ogTitle":2563,"ogDescription":2564,"noIndex":6,"ogImage":2565,"ogUrl":2566,"ogSiteName":685,"ogType":686,"canonicalUrls":2566,"schema":2567},"Adopt ModelOps within DevOps to solve data science challenges","The ModelOps stage of DevOps applies AI and ML to address complex data science challenges.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668437/Blog/Hero%20Images/faster-cycle-times.jpg","https://about.gitlab.com/blog/introducing-modelops-to-solve-data-science-challenges","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Adopt ModelOps within DevOps to solve data science challenges\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2022-01-21\",\n      }",{"title":2563,"description":2564,"authors":2569,"heroImage":2565,"date":2571,"body":2572,"category":693,"tags":2573},[2570],"Taylor McCaslin","2022-01-21","\nIn a [recent blog post](/blog/the-road-to-smarter-code-reviewer-recommendations/) discussing the progress of integrating novel machine learning (ML) algorithms into GitLab we introduced our new [ModelOps stage](/direction/modelops/). This stage is focused on enabling and empowering data science workloads on GitLab. GitLab ModelOps aims to bring data science into GitLab within existing features to make them smarter and more intelligent and empowering GitLab customers to build and integrate data science workloads within GitLab.\n\nAn interesting question we hear a lot is how will this be useful for DevOps professionals? So we wanted to dive into who exactly we’re building ModelOps features for and why. To begin, here is an overview of how we’ve chosen to structure our new ModelOps stage. \n\n## ModelOps: Enabling and empowering data science workloads\n\n![Chart of ModelOps stages](https://about.gitlab.com/images/blogimages/Screen_Shot_2022-01-19_at_1.11.36_PM.png){: .shadow}\n\nModelOps is about taking all the best practices we’ve learned building a DevOps platform and applying them to the unique challenges of AI and ML workloads. Our ModelOps stage is divided into three primary groups: DataOps, MLOps, and AI Assisted. Each group has specific jobs to be done and challenges. Part of the reason we chose this organization model is due to the different user personas we’re trying to solve problems for in each of these areas. Now let’s dive into the people in each group, as well as the challenges each group aims to solve. \n\n## DataOps: Get the data, clean it, and process it\n\nDataOps is focused on everything required to process data workloads, including fetching data, cleaning it, and processing it. You may have heard this called ELT, or Extract, Load, Transformation, of data. But DataOps is more than just the ELT, there are lots of other problems that come with data sources. For example, data located in many disparate systems in many formats and lacking common data definitions. Most data sources require a lot of processing to access, move, clean, and interpret data. We have specialists whose entire job is [all of the work to get data into usable states](https://online.hbs.edu/blog/post/data-life-cycle) so organizations can do something of business value with it. \n\nDepending on the organization, these data professionals may have different titles such as data engineer, data architect, or data analyst.  These data wranglers have many assorted jobs: aggregating disparate data sources, cleaning and shaping data into usable formats, making data available to the business, and even analyzing data and answering business questions.\n\nThe data experts leverage many tools such as ELT platforms, big data warehouses, data pipelines, and database technologies like SQL and elastic search. Data management tooling can be an extremely complex series of connections piping data in and out of various platforms. These challenges are the heart of the problems we’re aiming to solve.\n\n## MLOps: Do something useful with the data\n\nNext is MLOps, which is what most people associate with data science. MLOps aims to enable customer data science use cases, including accessing and interacting with data, AI/ML toolchain integrations, and compute environment integrations. Basically, everything that is required to build, test, train, and deploy AI/ML models into production systems. MLOps leverages math to solve problems using computing power to find patterns in the data that we just discussed with DataOps. \n\nData science teams feature professionals with titles such as data scientists, ML engineers, or ML specialists. These experts usually have a mix of higher-level math and statistics skills, software engineering, and basic DevOps skills. They can cobble together environments to build, train, test, and explore data science models to solve specific business problems.\n\nThe work data scientists do is more than just building ML models. They have to understand the business data and problems they are trying to leverage data science to solve. It’s usually very experimental and requires a lot of iteration to find a solution that solves a particular business problem in a useful way. It’s common for data scientists to spend a lot of time exploring and understanding datasets and the business problems organizations are hoping data science can solve. They then build and train AI/ML models, evaluate model output, and then iterate their models.\n\nAmong the common tools these data scientists use are Python notebooks, which allow them to leverage scripting to explore and manipulate data and try different modeling techniques. They also may use many open source ML and data science frameworks, as well as special data science platforms that help manage, version, interpret, and monitor models. Most of this work almost never happens in production environments. It happens on local machines or in cloud computing platforms where data scientists can leverage highly specialized compute, optimized for running data science models. That leaves an interesting challenge of how do you deploy their work to production systems.  Our last use case, DevOps, provides the solution. \n\n## AI Assisted: Leverage data to solve business problems \n\nWhile our AI Assisted group isn't specifically focused on any one user persona, we are planning to enrich existing GitLab features with ML. Our goal is to take features that require manual work to leverage and apply ML to automate these tasks. Tasks like assigning and labeling issues, choosing code reviewers, and even triaging and fixing security vulnerabilities. You can read more about our AI Assisted plans on our [direction page](/direction/ai-powered/) or check in on the status of our first Applied ML feature, [suggested reviewers](/blog/the-road-to-smarter-code-reviewer-recommendations/). Now that we've touched on improving GitLab for everyone, let's go back to GitLab's main persona, DevOps engineers.\n\n## DevOps: Build, test, and deploy software \n\nDevOps is probably the most understood use case that we’re trying to solve with our ModelOps stage. However, we’re focused on the intersection of DevOps and data science workloads. Specifically what happens when you need to deploy a data science model to a production system. GitLab’s DevOps platform is already an established and mature platform for building, testing, and deploying traditional software applications. But the software stacks of modern organizations are evolving and becoming more sophisticated, including leveraging ML. We’ve described some of the challenges and new personas that are involved with the development of data science workloads, but what happens when it’s time to go to production?\n\nToday, data science teams and DevOps engineers work in separate silos with very different skills sets and technology challenges. So when a data science team has a new ML model they want to push into a production software environment and integrate into a running application, in walks a whole new set of challenges. \n\nJust about every software company now has DevOps teams focused on repeatability, stability, and velocity of software development lifecycles. Everything relating to the design, build, testing, deployment, security, and monitoring of software from idea to deploy into a production system. These teams are usually comprised of software engineers and DevOps engineers. The people who write, build, and test code with repeatable CI/CD, allowing software teams to seamlessly develop software applications. \n\n## Helping them all work together\n\nOur goal with ModelOps is to help all of these people work together to build and deploy data-rich modern applications leveraging novel ML workloads. We want to bring data science into GitLab within existing features to make them smarter and more intelligent and to empower GitLab customers to build and integrate data science workloads in their own applications built and deployed with GitLab. Each of these groups has unique challenges and use cases that are interconnected. That’s part of what makes data science difficult. It has a lot of moving parts and crosses every aspect of modern software development lifecycles with very unique challenges. \n\nIf all of this is interesting to you, you may also enjoy watching our recent Contribute session, where we discuss more about what we plan to accomplish with our ModelOps stage, which you can watch on YouTube.\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n  \u003Ciframe src=\"https://www.youtube.com/embed/C08QVI99JLo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n_This blog post contains information related to upcoming products, features and functionality._\n\n_It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes._\n\n_As with all projects, the items mentioned in this blog post and linked pages are subject to change and delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n",[722,743,9],{"slug":2575,"featured":6,"template":699},"introducing-modelops-to-solve-data-science-challenges","content:en-us:blog:introducing-modelops-to-solve-data-science-challenges.yml","Introducing Modelops To Solve Data Science Challenges","en-us/blog/introducing-modelops-to-solve-data-science-challenges.yml","en-us/blog/introducing-modelops-to-solve-data-science-challenges",{"_path":2581,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2582,"content":2588,"config":2592,"_id":2594,"_type":13,"title":2595,"_source":15,"_file":2596,"_stem":2597,"_extension":18},"/en-us/blog/introducing-resource-groups",{"title":2583,"description":2584,"ogTitle":2583,"ogDescription":2584,"noIndex":6,"ogImage":2585,"ogUrl":2586,"ogSiteName":685,"ogType":686,"canonicalUrls":2586,"schema":2587},"Introducing: Resource groups","How we’re improving deployments by limiting pipeline concurrency.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679102/Blog/Hero%20Images/resource-groups.jpg","https://about.gitlab.com/blog/introducing-resource-groups","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing: Resource groups\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-01-21\",\n      }",{"title":2583,"description":2584,"authors":2589,"heroImage":2585,"date":1563,"body":2590,"category":718,"tags":2591},[1113],"\nGitLab CI/CD pipelines build, test, deploy your code as part of a single workflow integrated across all [stages of the DevOps lifecycle](/topics/devops/). Ultimately, we want to enable teams to deploy better software faster to their customers, and we do that by continually [iterating](https://handbook.gitlab.com/handbook/values/#iteration) on new and existing features to improve the GitLab experience.\n\nContinuous delivery is all about making sure that [CI-validated code](/solutions/continuous-integration/) goes through a structured deployment pipeline. While GitLab CI continues to be [a top-rated solution in continuous integration](/analysts/forrester-cloudci19/), we want our continuous delivery capabilities to be just as loved and feedback from the GitLab community plays a big role in how we improve the user experience.\n\nAt GitLab, everything we do is [public by default](https://handbook.gitlab.com/handbook/values/#public-by-default). This allows us to collaborate and share ideas, documentation, examples, and processes with the whole community. The original idea of limiting pipeline concurrency using resource groups was introduced by [@inem](https://gitlab.com/inem) in [a public issue](https://gitlab.com/gitlab-org/gitlab/issues/15536) and the response was certainly enthusiastic.\n\n![Resource groups response](https://about.gitlab.com/images/blogimages/resource-groups-1.png){: .shadow.small.center}\n\nFor some users, they found that running multiple pipelines and/or jobs at the same time in an environment would lead to errors. Some pipelines and/or jobs use unique resources, and concurrent deployments meant that multiple users were affecting the environment with some unintended consequences.\n\n### Example:\n\nLet's say your team is developing a mobile app and you deploy it for testing purposes to a physical smartphone on a Friday afternoon. Maybe you're a startup and only have one or two phones for this purpose. You may need to clear the cache and delete the app before downloading it again so you can start the test clean. But what if in the middle of your test, someone else decides to clear the data on that device? Situations like this would inevitably cause errors, leaving teams with little choice but to coordinate these deployments amongst themselves.\n\nWe’re always working hard to enable [speedy, reliable pipelines](/direction/ops/#speedy-reliable-pipelines). Coming to GitLab 12.7, available tomorrow, we’re introducing the `resource_group` attribute to projects so that only one job can deploy to a specific resource group at any given time. This will improve deployment flows, especially when deploying to physical environments.\n\nIf we go back to the mobile phone example, the phone would be it’s own `resource_group` and will only have one deployment at a time. If another deployment were to try and run on this device, the job will be queued until the first job is finished with the message “waiting for resource.”\n\n![waiting on resource](https://about.gitlab.com/images/blogimages/resource-groups-2.png){: .shadow.medium.center}\n\nTeams can define multiple `resource_group`(s) for their environment in `.gitlab-ci.yml`. Even if running separate pipelines, as long as a `resource_group` is assigned then the jobs will not run concurrently. Tools like [Terraform](https://www.terraform.io/docs/internals/graph.html) similarly help users manage concurrencies by limiting resources.\n\nAs we continue to improve and iterate on our [product vision for continuous delivery](/direction/ops/), we’ll be looking to make future improvements to resource groups and deployment environments. Some of our plans include implicit environment locking, [only allowing forward incremental deployments](https://gitlab.com/gitlab-org/gitlab/issues/25276), and the flexibility to define concurrency values (the default of 1 can’t be configured in this release).\n\nPlease join us in our [public epic](https://gitlab.com/groups/gitlab-org/-/epics/1294) where we discuss continuous delivery and feel free to give feedback or suggestions on ways we can improve deployments. Everyone can contribute.\n\nCover image by [mostafa meraji](https://unsplash.com/@mostafa_meraji?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/turnstile?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[108,9],{"slug":2593,"featured":6,"template":699},"introducing-resource-groups","content:en-us:blog:introducing-resource-groups.yml","Introducing Resource Groups","en-us/blog/introducing-resource-groups.yml","en-us/blog/introducing-resource-groups",{"_path":2599,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2600,"content":2605,"config":2609,"_id":2611,"_type":13,"title":2612,"_source":15,"_file":2613,"_stem":2614,"_extension":18},"/en-us/blog/introducing-the-gitlab-kubernetes-agent",{"title":2601,"description":2602,"ogTitle":2601,"ogDescription":2602,"noIndex":6,"ogImage":803,"ogUrl":2603,"ogSiteName":685,"ogType":686,"canonicalUrls":2603,"schema":2604},"Understand the new GitLab Agent for Kubernetes","Just released in 13.4, our brand new Kubernetes Agent provides a secure and K8s–friendly approach to integrating GitLab with your clusters.","https://about.gitlab.com/blog/introducing-the-gitlab-kubernetes-agent","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Understand the new GitLab Agent for Kubernetes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2020-09-22\",\n      }",{"title":2601,"description":2602,"authors":2606,"heroImage":803,"date":1779,"body":2607,"category":832,"tags":2608},[1798],"\n\nWe are happy to share the first iteration of the GitLab Agent for Kubernetes with our users and community. The Agent is the foundation for the next generation of the integration between GitLab and Kubernetes. \n\n## A bit of history of the GitLab Kubernetes Integrations\n\nGitLab's current Kubernetes integrations were introduced more than three years ago. Their primary goal was to allow a simple setup of clusters and provide a smooth deployment experience to our users. These integrations served us well in the past years but at the same time its weaknesses were limiting for some important and crucial use cases. The biggest weaknesses we see with the current integration are:\n\n- the requirement to open up the cluster to the internet, especially to GitLab\n- the need for cluster admin rights to get the benefit of GitLab Managed Clusters\n- exclusive support for push-based deployments that might not suit some highly regulated industries\n\nA few months ago, the Configure Team at GitLab started going in a new direction to come up with an integration that could address these weaknesses and provide a cloud native tie-in between GitLab and Kubernetes. This new direction is built on the GitLab Agent for Kubernetes, which we released in [GitLab 13.4](/releases/2020/09/22/gitlab-13-4-released/).\n\n## Design goals\n\nWhen we sat down to solve for the above weaknesses, we came up with a few principles that we are seeking to follow.\n\nWe want to be good cloud native citizens, and work together with the community, instead of reinventing the wheel.\n\nWe primarily want to serve expert Kubernetes platform engineers. While the current GitLab Managed Clusters and cluster creation from within GitLab might serve many use cases, it's primarily aimed at simple cluster setup and is not flexible enough to be the basis for production clusters. We want to change this approach, and are focusing on the needs of expert Kubernetes engineers first. We think that coming up with sane defaults will provide the necessary simplicity for new Kubernetes users as well.\n\nWe want to offer a secure solution that allows cluster operators to restrict GitLab's rights in the cluster and does not require opening up the cluster to the Internet.\n\n## The Agent\n\nFollowing the above goals, we've started to develop the GitLab Agent for Kubernetes. The Agent provides a permanent communication channel between GitLab and the cluster. To follow industry best practices for [GitOps](/topics/gitops/) it is configured by code, instead of a UI.\n\nThe current version of the Agent allows for pull-based deployments. Its deployment machinery is built on the [`gitops-engine`](https://github.com/argoproj/gitops-engine), a project initiated by ArgoCD and Flux where GitLab engineers are actively contributing as well.\n\n### Setting up the GitLab Agent\n\nThe Agent needs to be set up first. This requires a few actions from the user:\n\n- create an Agent token for authentication with GitLab, and store it in your cluster as a secret\n- commit the necessary Agent configurations in one of your repositories\n- install the Agent to your cluster\n\n### Deployments with an Agent\n\nAs mentioned above, the Agent needs a configuration directory inside one of your repositories. This configuration describes the projects that the Agent syncs into your clusters. We call the synced projects the __manifest project__. The manifest project should contain Kubernetes manifest files. The __manifest project__ project might be either inside or separated from your application code.\n\nWe've set up a simple example that shows a __manifest project__ and an __application project__. In this example [GitLab CI/CD](/topics/ci-cd/) in the __application project__ is used to create a container image and update the __manifest project__. Then the Agent picks up the changes from the __manifest project__, and deploys the Kubernetes manifests stored there.\n\n### Limitations\n\nAs this is the initial release of the Agent, it has many known limitations. We don't support all the amazing features the previous GitLab Kubernetes integration does such as [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/), deploy boards, GitLab Managed Apps, etc. To start in GitLab 13.4 we limited our focus to supporting pull-based deployment for Helm-based GitLab installations. \n\nFollowing the current release, we will be focusing on:\n\n- [shipping the GitLab Agent for Kubernetes as part of the Official Linux Package](https://gitlab.com/groups/gitlab-org/-/epics/3834)\n- [supporting the deployment of private repositories](https://gitlab.com/gitlab-org/gitlab/-/issues/220912)\n\n## Further plans for GitLab Kubernetes Integrations\n\nThe Agent opens up many new opportunities for GitLab's Kubernetes integrations. Having an active component allows us to provide all the GitLab functionalities in locked down clusters as well. We're currently looking into the following areas to support with the agent:\n\n- integrate cluster-side dynamic container scanning with GitLab\n- use GitLab as an authentication and authorization provider for Kubernetes clusters\n- offer linters and checks for Kubernetes best practices on deployed resources\n- proxy cluster services easily through GitLab\n\nYou can see all our plans in the [Agent epic](https://gitlab.com/groups/gitlab-org/-/epics/3329) where we invite you to give us feedback and about this direction. \n\nYou can view a demo of how to install and use the GitLab Agent below:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://player.vimeo.com/video/505413162\" width=\"640\" height=\"480\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\u003C/figure>\n",[1742,835,1925,9],{"slug":2610,"featured":6,"template":699},"introducing-the-gitlab-kubernetes-agent","content:en-us:blog:introducing-the-gitlab-kubernetes-agent.yml","Introducing The Gitlab Kubernetes Agent","en-us/blog/introducing-the-gitlab-kubernetes-agent.yml","en-us/blog/introducing-the-gitlab-kubernetes-agent",{"_path":2616,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2617,"content":2623,"config":2627,"_id":2629,"_type":13,"title":2630,"_source":15,"_file":2631,"_stem":2632,"_extension":18},"/en-us/blog/issue-boards-anniversary",{"title":2618,"description":2619,"ogTitle":2618,"ogDescription":2619,"noIndex":6,"ogImage":2620,"ogUrl":2621,"ogSiteName":685,"ogType":686,"canonicalUrls":2621,"schema":2622},"The evolution of the GitLab Issue Board","Celebrating one year of flexible, integrated project and release management workflows inside GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680378/Blog/Hero%20Images/issue-boards-anniversary.jpg","https://about.gitlab.com/blog/issue-boards-anniversary","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The evolution of the GitLab Issue Board\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Erica Lindberg\"}],\n        \"datePublished\": \"2017-08-23\",\n      }",{"title":2618,"description":2619,"authors":2624,"heroImage":2620,"date":2042,"body":2625,"category":300,"tags":2626},[715],"\n\n[Collaboration is driven by conversation](https://www.forbes.com/sites/laurencebradford/2016/12/30/the-future-of-web-development-with-gitlab-ceo-sid-sijbrandij/#7b8895b04148). It should be a natural and integrated\npractice throughout the development lifecycle, not a manual, contrived process\nthat requires administration and maintenance. By integrating every step of the\nsoftware development lifecycle and providing all the necessary collaboration\ntools, GitLab by default provides all the capabilities modern development teams need to support cross-functional collaboration at scale. GitLab was created so teams could focus more on their work, not on configuring their tools.\n\n\u003C!-- more -->\n\nLast year, we announced the first iteration of the [GitLab Issue Board](/stages-devops-lifecycle/issueboard/), a major\nmilestone in our mission to create an open source and integrated\nproduct for modern software development. Built on top of our integrated issue\ntracking system, it became possible to visualize your work and customize your\nworkflow inside of GitLab.\n\nToday, GitLab comes with everything you need to plan and track projects and\nreleases, including issues (tracker and board), milestones, burndown charts,\nchat integration, and more. Communication is centralized, plans\nand progress are visible, and work is linked, making collaboration frictionless.\n\nTo celebrate our progress over the past year, no small thanks to the community and feedback from our customers, we wanted to take a look at where\nthe Issue Board has gone since we launched its first iteration in GitLab 8.11.\n\n## The evolution of the GitLab Issue Board\n\nAt GitLab, we practice [Conversational Development](http://conversationaldevelopment.com/). Our software development is centered around conversations on what can be improved, how to implement it,\nwhether or not it worked, and if it achieved the expected value. Instead of\nwaiting months to release a “perfect” feature, we work on smaller, functional\nchanges that can get into the hands of our users much more quickly. There’s a few reasons why we develop software this way: in addition to being\nable to deliver value faster, we can also react to the market and iterate faster with more frequent feedback loops, and if something goes wrong, it’s easier to spot and fix the problem.\n\nThis is how we built the GitLab Issue Board. We started by shipping the basic\nfunctionality needed to allow users to visualize and track their issues. Over the last 12 months, we’ve released small changes every month. Today, the GitLab Issue Board has everything you need to plan and track your projects and releases.\n\nHere’s how we built it over multiple monthly releases:\n\n### August—October 2016 | GitLab 8.11-13\nThe [GitLab Issue Board](https://docs.gitlab.com/ee/user/project/issue_board.html)\nis released in 8.11. Built on top of our integrated issue\ntracking system, it uses labels from issues to create lists on a board. You can drag and drop lists to organize your workflow, move issues between lists, and labels are updated automatically as you move them across the board.\n\nUsers now have the ability to [create workflows inside of GitLab](https://docs.gitlab.com/ee/user/project/issue_board.html#creating-workflows).\n\n\u003Cimg src=\"/images/8_11/issue_boards.gif\" alt=\"Issue Boards in GitLab 8.11\" class=\"shadow\">\n\n[Multiple Issue Boards](https://docs.gitlab.com/ee/user/project/issue_board.html#multiple-issue-boards) are released in 8.13. Users can now create multiple\nworkflows, allowing different teams to create their own customized boards with\nthe same issues. Once a board is finished, you can leave it as is to review later, or recycle it.\n\n\u003Cimg src=\"/images/8_13/m_ib.gif\" alt=\"Multiple Issue Boards in GitLab 8.13\" class=\"shadow\">\n\n### January—February 2017 | GitLab 8.16-17\nNew search and filter interface is added to the [Issue Tracker](https://docs.gitlab.com/ee/user/project/issues/index.html) in 8.16, making it easier for users to search and filter their issues by different attributes such as author, assignee, milestone, and label.\n\nThe new search and filter interface is added to the Issue Board in GitLab 8.17,\nimproving usability. A modal window is added to display all issues that don’t belong to a list for easier search and filtering.\nIssues can be added to a list from the modal, and issues can be removed from a\nlist on the board.\n\n\u003Cimg src=\"/images/8_17/board_modal.png\" alt=\"Add issues modal in board in GitLab 8.17\" class=\"shadow\">\n\n### March 2017 | GitLab 9.0\n[Milestones](https://docs.gitlab.com/ee/user/project/milestones/index.html) are added to the Issue Board in GitLab 9.0 enabling users to organize issues into cycles or sprints with a start date and deadline. Issues can be filtered on the board by milestone, or new boards can be created for individual milestones.\n\n\u003Cimg src=\"/images/9_0/boards_milestone.gif\" alt=\"Boards Milestone\" class=\"shadow\">\n\nThe ability to [reorder issues in a Board list](https://docs.gitlab.com/ee/user/project/issue_board.html#re-ordering-an-issue-in-a-list) is also introduced in 9.0. Now, users prioritize issues within a list simply by dragging and dropping the issue card.\n\n\u003Cimg src=\"/images/9_0/boards_reorder.gif\" alt=\"Boards Reorder\" class=\"shadow\">\n\n## Get started with GitLab Issue Boards\n\nStart building your project and release management workflows using the Issue Board.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/CiolDtBIOA0\" frameborder=\"0\" allowfullscreen>\u003C/iframe>\n\nIn this video, Discussion Product Manager Victor Wu demonstrates how to use GitLab Issue Boards for Agile and Scrum-style planning and tracking.\n\n### Documentation Quick Links\n\n- [Issue Board Overview](https://docs.gitlab.com/ee/user/project/issue_board.html#overview)\n- [Issue Boards for Scrum](https://docs.gitlab.com/ee/user/project/issue_board.html#scrum-team)\n- [Issue Board terminology](https://docs.gitlab.com/ee/user/project/issue_board.html#issue-board-terminology)\n- [Creating workflows](https://docs.gitlab.com/ee/user/project/issue_board.html#creating-workflows)\n\n## Help us celebrate our #Issueversary\n\nEveryone has an Issue Board story. Maybe you spent months on a long conversation to get your legal team to embrace the Kanban and promptly blew their minds. Maybe your team is full of diehards driven by strongly held opinions over exactly how many stages yours should have. Maybe you're a remote worker and your issue board is one of the main ways you keep up with teammates spread across the globe.\n\nWhatever your story is, we want to hear from you! Help us celebrate a year of the GitLab Issue Board by [sending us your Issue Board story](https://docs.google.com/forms/d/e/1FAIpQLSf_0DTiQX1X048X6ioAVLRLSBwJzVSG1LH7LupoFdsascPAAw/viewform)\nfor your chance to win free GitLab swag. We'll tweet out our favorites and announce the winners on September 5.\n",[834,9],{"slug":2628,"featured":6,"template":699},"issue-boards-anniversary","content:en-us:blog:issue-boards-anniversary.yml","Issue Boards Anniversary","en-us/blog/issue-boards-anniversary.yml","en-us/blog/issue-boards-anniversary",{"_path":2634,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2635,"content":2641,"config":2646,"_id":2648,"_type":13,"title":2649,"_source":15,"_file":2650,"_stem":2651,"_extension":18},"/en-us/blog/issue-labels-can-now-be-scoped",{"title":2636,"description":2637,"ogTitle":2636,"ogDescription":2637,"noIndex":6,"ogImage":2638,"ogUrl":2639,"ogSiteName":685,"ogType":686,"canonicalUrls":2639,"schema":2640},"Issue labels can now be scoped!","A small change with a huge impact: Scoped Labels can help teams customize their workflow and speed up delivery.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679729/Blog/Hero%20Images/scopedlabels.jpg","https://about.gitlab.com/blog/issue-labels-can-now-be-scoped","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Issue labels can now be scoped!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2019-06-20\",\n      }",{"title":2636,"description":2637,"authors":2642,"heroImage":2638,"date":2643,"body":2644,"category":718,"tags":2645},[852],"2019-06-20","\n\nGreat news, everyone!\n[Hailed as one of the best inventions since sliced bread](https://gitlab.com/gitlab-com/marketing/corporate-marketing/issues/682),\n[Scoped Labels](/releases/2019/04/22/gitlab-11-10-released/#scoped-labels) can make your\ncustom workflows even cooler. We’re excited to share how using this small feature can\naccelerate delivery.\n\nPlease note that this is a paid feature available to Premium and Ultimate for [self-managed](/pricing/#self-managed) GitLab\nor Silver and Gold tiers for [GitLab.com](/pricing/).\n{: .alert .alert-info.text-center}\n\n## What are GitLab Scoped Labels?\n\nIt all started with\n[an issue detailing a feature with a simple idea](https://gitlab.com/gitlab-org/gitlab-ee/issues/9175):\nHelp teams that use issue boards for workflow. With Scoped Labels, teams can apply\nmutually exclusive labels (that share the same scope) to an issue, merge request,\nor epic, solving custom fields and custom workflow states use cases. Scoped Labels\nmake it possible for teams to define a basic custom field that avoids confusion and\ncleans up issue lists (i.e. fewer duplicate labels).\n\n![Scoped labels](https://about.gitlab.com/images/blogimages/scoped-labels.png){: .shadow.center.medium}\n\nBy using Scoped Labels, teams can create custom labels and apply them to a\ngiven issue, automatically removing any other existing, related labels. For example,\nif you have the labels `workflow::development`, `workflow::review`, and `workflow::deployed`,\nrepresenting the workflow states of your team, you can advance the issue\n(e.g., `workflow::development` to `workflow::review`) by applying the next label\nwithout having to remove the original one.\n\nYou may already be familiar with this behavior, since it’s similar to moving\nissues across label lists in an issue board. Now, team members who don’t directly work\nin an issue board or who want to advance workflow states consistently in\nissues themselves can do so using Scoped Labels.\n\n## How Scoped Labels accelerate delivery\n\nYou might be thinking that Scoped Labels is too small of a feature to make a splash,\nbut hear me out, it can help reduce cycle time. Here's how:\n\n1. If you want a custom field on your issues, like a drop-down with a few items\nyou can select (e.g., colors or stages), Scoped Labels prevent conflicts where\nnormally only one color or one stage is possible. By removing conflicts, multiple\nteams can scope an issue, merge request, or epic.\n1. You can define the workflow steps for an issue (e.g., proposal, design,\ndevelopment, QA, acceptance, deploy), creating the basis for how you can eventually\nmeasure the flow of work though the system (based on how long issues have specific labels).\n\nThese two use cases illustrate how Scoped Labels can help teams work concurrently\non features and measure their efforts.\n\n## Scoped Labels: A feature film\n\nWant to see Scoped Labels in action? Get your popcorn ready and enjoy the show! 🍿\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4BCBby6du3c\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCheck out the [documentation on Scoped Labels](https://docs.gitlab.com/ee/user/project/labels.html#scoped-labels) for more.\n\nCover image by [ Jo Szczepanska](https://unsplash.com/@joszczepanska?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/9OKGEVJiTKk)\n{: .note}\n",[834,9,744],{"slug":2647,"featured":6,"template":699},"issue-labels-can-now-be-scoped","content:en-us:blog:issue-labels-can-now-be-scoped.yml","Issue Labels Can Now Be Scoped","en-us/blog/issue-labels-can-now-be-scoped.yml","en-us/blog/issue-labels-can-now-be-scoped",{"_path":2653,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2654,"content":2660,"config":2665,"_id":2667,"_type":13,"title":2668,"_source":15,"_file":2669,"_stem":2670,"_extension":18},"/en-us/blog/iteration-on-error-tracking",{"title":2655,"description":2656,"ogTitle":2655,"ogDescription":2656,"noIndex":6,"ogImage":2657,"ogUrl":2658,"ogSiteName":685,"ogType":686,"canonicalUrls":2658,"schema":2659},"Why we scoped down to build up error tracking ","We dig into how shipping small iterations is accelerating delivery on our error tracking product.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665440/Blog/Hero%20Images/automate-ce-ee-merges.jpg","https://about.gitlab.com/blog/iteration-on-error-tracking","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we scoped down to build up error tracking \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2020-01-23\",\n      }",{"title":2655,"description":2656,"authors":2661,"heroImage":2657,"date":2662,"body":2663,"category":832,"tags":2664},[1901],"2020-01-23","When our vision for [error tracking](https://about.gitlab.com/direction/monitor/platform-insights/error_tracking/) is fully realized, the developers who use GitLab will be able to find and fix errors before their customers ever report them, all while staying in our tool. But waiting until our error tracking feature is pristine would just us slow down.\n\nInstead, the engineers and product managers on the [Monitor:Health](https://handbook.gitlab.com/handbook/engineering/development/ops/monitor/respond/) team work **iteratively** by shipping smaller changes as we move closer to achieving our vision for the error tracking feature.\n\n## What does it mean to work iteratively?\n\n\"[Iterating] means scoping down a task to deliver it sooner. So, it means making something smaller so you can get it done quicker,\" says [Sid Sijbrandij](/company/team/#sytses), CEO and co-founder of GitLab.\n\nWe made [iteration](https://handbook.gitlab.com/handbook/values/#iteration) one of our core company values because of the fundamental belief that even a small change is better than no change at all. And while iteration in engineering is already recognized as being effective, our organization aims to make iteration a component to every team’s workflow.\n\nIn the video below, Sid and [Christopher \"Leif\" Lefelhocz](https://about.gitlab.com/company/team/#christopher-l), senior director of development, share how the product and engineering teams worked together to speed up development on error tracking by breaking the engineering process down into small steps and iterating as they go.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/tPTweQlBS54\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWe followed up with the Monitor:Health team to talk about how product and engineering worked together to develop an iterative strategy for making improvements to our error tracking product, both in terms of how our product team built the plan for error tracking and how engineering shipped the [minimum viable change](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc) (MVC) to production.\n\n## How we created a product strategy for error tracking\n\nError tracking is a process whereby application errors are identified and fixed as quickly as possible. The way error tracking functions at GitLab today is [through integration with Sentry](https://about.gitlab.com/direction/monitor/platform-insights/error_tracking/), which aggregates errors, surfaces them in the GitLab UI, and provides the tools to triage and respond to the critical ones.\n\nToday, our error tracking feature is at the [minimal level of maturity](https://about.gitlab.com/direction/monitor/platform-insights/error_tracking/), meaning we still have plenty of work to do before this feature is viable.\n\n\"The goal was to be able to provide error tracking as a product and bring these processes closer to the development delivery workflow,\" said [Sarah Waldner](/company/team/#sarahwaldner), senior product manager on the Monitor:Health team.\n\nThe product team summarized what needs to be done to move [error tracking at GitLab from minimal to viable](https://gitlab.com/groups/gitlab-org/-/epics/1625) as part of a detailed [parent epic](https://docs.gitlab.com/ee/user/group/epics/#multi-level-child-epics). The parent epic essentially establishes product priorities by defining which use cases error tracking needs to solve in order for the product to be considered a viable feature. The next step was to define the core problems that users encounter with error tracking and double-check the solutions that should be used to solve those problems.\n\n\"Once we came up with these problems and validated those, we moved into a solution validation cycle whereby designers came up with different solutions and flows for these and then we tested them with different users,\" says Sarah. \"After we did all of that and have all of our solutions validated we broke it down into four different things that someone needs to do from a high level with Sentry.\"\n\nThose top four actions were divided into child epics which roll-up to the parent epic, and include:\n\n*   [The instrumentation or configuration of Sentry](https://gitlab.com/groups/gitlab-org/-/epics/2036)\n*   [Correlating errors](https://gitlab.com/groups/gitlab-org/-/epics/2035)\n*   [Resolving errors](https://gitlab.com/groups/gitlab-org/-/epics/2034)\n*   [Triaging errors](https://gitlab.com/groups/gitlab-org/-/epics/2029)\n\nBy breaking down the problems and establishing solutions, the team took an important step toward establishing their product development priorities. Contained in each of these child epics are other epics and issues which break down the solutions into the larger aspects.\n\n## Establishing development priorities\n\nThe team recognized that, in order to boost error tracking to viable, there needed to be a better way to resolve errors that are surfaced by Sentry within GitLab. The team created an epic for [resolving errors](https://gitlab.com/groups/gitlab-org/-/epics/2034), and outlined some of the key development priorities.\n\n\"So, to resolve errors, if you have an error that you need to fix, you might want to create an issue to track that work, respond to it, and close that issue in the general workflow,\" says Sarah. \"So within the resolving errors workflow part of the error tracking parent epic, we pose the idea of being able to manually open an issue from a Sentry error, which was then broken down further into where you do it from, and further again on the error detail page.\"\n\n![Resolve errors epic](https://about.gitlab.com/images/blogimages/resolve_errors_epic.png){: .shadow.medium.center}\nThe workflow for the resolve errors epic is broken down into multiple child epics, which correlate to different development projects.\n{: .note.text-center}\n\nThe team decided that we needed the ability to [create an issue within GitLab based on the errors detected by Sentry](https://gitlab.com/groups/gitlab-org/-/epics/2210) and that they wanted this function and button to appear on both the error list page as well as on the [error detail page](https://gitlab.com/groups/gitlab-org/-/epics/2210). The team then decided to make the error detail page the first priority.\n\n\"Through conversation, we were able to determine what is the bare minimum of value and broke it down as best as we could from frontend to backend, with the idea that it's better to ship something small that's not fully complete than (to ship) nothing at all,\" says [Clement Ho](/company/team/#ClemMakesApps), frontend engineering manager on Monitor:Health.\n\n## The \"Create an Issue\" button in three iterations\n\n\"Being able to open an issue from the error detail page seems really simple, but once you talk through what that workflow actually looks like, there are a lot more aspects to it than previously thought,\" says Sarah.\n\n![Open issue workflow](https://about.gitlab.com/images/blogimages/open_issue_epic.png){: .shadow.medium.center}\nBreaking the frontend and backend engineering into iterations shows just how much work needs to be done to ship even one minor component of the error tracking product.\n{: .note.text-center}\n\n### The \"Create an Issue\" button in stages\n\nClement was the architect behind the `Create an Issue` button frontend iterations. He explained that he wanted to take advantage of GitLab deploying frequently, and so he broke down the development process for the `Create an Issue` button into a series of small steps.\n\nThe [first iteration](https://gitlab.com/gitlab-org/gitlab/issues/36537) was simply to build the ability to create an issue from the error detail page. In this iteration, the `Create an Issue` button was simple and unstyled and clicking it led the user to a blank issue. While not overly helpful at this phase, it represents a good start in allowing someone to respond to an error.\n\n![Create an Issue button](https://about.gitlab.com/images/blogimages/create_an_issue_it1.png){: .shadow.medium.center}\nWhat the `Create an Issue` button will look like when it's done.\n{: .note.text-center}\n\nIn the [second iteration](https://gitlab.com/gitlab-org/gitlab/issues/36540), the user clicks `Create an Issue` and the issue comes pre-filled with the Sentry error title, description, and link. It’s still not styled and consistent with GitLab UI yet, but it’s possible to see more of the error context when creating an issue in response to the error.\n\nIn the [third iteration](https://gitlab.com/gitlab-org/gitlab/issues/36542), the GitLab UI gets cleaned up and the issue comes with proper formatting.\n\n\"Now, we are three issues into this and each one has been done in a couple of days and after the first couple of days, someone was able to create an issue,\" says Sarah. \"And that way we got the system much faster instead of first adding the button and then adding the experience of the new issue and then having all of the information in there styled.\"\n\n### Is it better to start with frontend or backend engineering?\n\nAs Christopher noted in his [conversation with Sid](https://www.youtube.com/watch?v=tPTweQlBS54), everything that Clement was working on in the first three iterations was frontend-focused; typically engineers start problem-solving from the backend.\n\n\"I love frontend first. I love interface first also because it helps everyone think about it,\" says [Sid in to Christopher regarding this project](https://www.youtube.com/watch?v=tPTweQlBS54). \"If you have something in the interface it's easier to understand for customers, for backend people, etc. So in the end what the customer sees is the product. One way to develop is to start with the readme or start with the press release. After that, the closest thing you can think of is the interface. So I think it's much better to have an interface built and then do the backend than vice versa. Even though I come from backend engineering.\"\n\nJust a few days after Clement started building the frontend of the `Create an Issue` button the backend team started building support in separate issues. The main priority was to build backend support that associates issues to errors so that users are not creating multiple issues for the same error. The engineers also built frontend support so the user can see that an issue was already created and linked to a particular error.\n\n## The power of iterative thinking\n\n\"One huge thing that came out of this is all team members now feel empowered to create issues and to just add them to the milestone and if they realize something is too big, they can create followups or second iterations,\" says Sarah.\n\nWhile the end goal is to build a viable error tracking product, the big vision simply cannot be achieved without smaller, incremental steps. While it is clear that the engineering teams embraced iteration, Sarah and the product team also recognized the strong strategic value of iterative product development.\n\nAt the same time, Clement wanted to take advantage of GitLab’s frequent deployments, but he also realized that by breaking down the engineering process into MVCs he could also drive up [merge request rate](https://handbook.gitlab.com/handbook/engineering/development/performance-indicators/#mr-rate) on the Monitor:Health frontend engineering team (the average number of merge requests per engineer merged per month) which is a [KPI](https://handbook.gitlab.com/handbook/engineering/development/performance-indicators/#mr-rate).\n\n![MR rate increases](https://about.gitlab.com/images/blogimages/mrs.png){: .shadow.medium.center}\nThe data shows an increase in the rate of merge requests on the Monitor:Health frontend engineering team.\n{: .note.text-center}\n\nThe data speaks for itself, since breaking down the product development process for error tracking into smaller iterations, the MR rate for Clement’s team has increased. 🎉\n\n## Scoping down to speed things up\n\nClement says that one of his key takeaways from this iterative development process was that GitLab ought to embrace iteration on the engineering side, but also iteration in product development. He is encouraging his team to ship MVCs more frequently, and plans to check his work by running through the process a few more times to iron out any wrinkles in the workflow.\n\nWhile the highly iterative approach to error tracking has been lauded by everyone from the senior director of development to our very own CEO, Clement acknowledges that this is still a work-in-progress.\n\n\"I think the cost is communication and information being spread out everywhere,\" Clement says.\n\nHe advises teams looking to adopt this highly iterative approach be extra disciplined at consolidating conversation on specific epics and issues within GitLab, otherwise, communication can get unwieldy, fast.\n\nCover photo by Max Ostrozhinskiy on Unsplash.\n{: .note}\n",[722,2141,743,9],{"slug":2666,"featured":6,"template":699},"iteration-on-error-tracking","content:en-us:blog:iteration-on-error-tracking.yml","Iteration On Error Tracking","en-us/blog/iteration-on-error-tracking.yml","en-us/blog/iteration-on-error-tracking",{"_path":2672,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2673,"content":2679,"config":2685,"_id":2687,"_type":13,"title":2688,"_source":15,"_file":2689,"_stem":2690,"_extension":18},"/en-us/blog/keeping-git-commit-history-clean",{"title":2674,"description":2675,"ogTitle":2674,"ogDescription":2675,"noIndex":6,"ogImage":2676,"ogUrl":2677,"ogSiteName":685,"ogType":686,"canonicalUrls":2677,"schema":2678},"How (and why!) to keep your Git commit history clean","Git commit history is very easy to mess up, here's how you can fix it!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659457/Blog/Hero%20Images/keep-git-commit-history-clean.jpg","https://about.gitlab.com/blog/keeping-git-commit-history-clean","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How (and why!) to keep your Git commit history clean\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kushal Pandya\"}],\n        \"datePublished\": \"2018-06-07\",\n      }",{"title":2674,"description":2675,"authors":2680,"heroImage":2676,"date":2682,"body":2683,"category":832,"tags":2684},[2681],"Kushal Pandya","2018-06-07","Git commits are one of the key parts of a [Git\nrepository](/solutions/source-code-management/), and more so,\nthe _commit message_ is a life log for the repository. As the\nproject/repository evolves over time (new features getting added, bugs being\nfixed, architecture being refactored), commit messages are the place where\none can see what was changed and how. So it's important that these messages\nreflect the underlying change in a short, precise manner.\n\n\n## Why a meaningful Git commit history is important\n\n\nWhat does Git commit do? Git commit messages are the fingerprints that you\nleave on the code you touch. Any code that you commit today, a year from now\nwhen you look at the same change; you would be thankful for a clear,\nmeaningful commit message that you wrote, and it will also make the lives of\nyour fellow developers easier. When Git commits are isolated based on\ncontext, a bug which was introduced by a single commit becomes quicker to\nfind, and the easier it is to revert the commit which caused the bug in the\nfirst place.\n\n\nWhile working on a large project, we often deal with a lot of moving parts\nthat are updated, added or removed. Ensuring that commit messages are\nmaintained in such cases could be tricky, especially when development spans\nacross days, weeks, or even months. So to simplify the effort of maintaining\nconcise commit history, this article will use some of the common situations\nthat a developer might face while working on a Git repository.\n\n\n- [Situation 1: I need to change the most recent\ncommit](#situation-1-i-need-to-change-the-most-recent-commit)\n\n- [Situation 2: I need to change a specific\ncommit](#situation-2-i-need-to-change-a-specific-commit)\n\n- [Situation 3: I need to add, remove, or combine\ncommits](#situation-3-i-need-to-add-remove-or-combine-commits)\n\n- [Situation 4: My commit history doesn't make sense, I need a fresh\nstart!](#situation-4-my-commit-history-doesnt-make-sense-i-need-a-fresh-start)\n\n\nBut before we dive in, let's quickly go through what a typical development\nworkflow looks like in our hypothetical Ruby application.\n\n\n**Note:** This article assumes that you are aware about basics of Git, how\nbranches work, how to add uncommitted changes of a branch to stage and how\nto commit the changes. If you're unsure of these flows, [our\ndocumentation](https://docs.gitlab.com/ee/topics/git/index.html) is a great\nstarting point.\n\n\n## A day in the life\n\n\nHere, we are working on a small Ruby on Rails project where we need to add a\nnavigation view on the homepage and that involves updating and adding\nseveral files. Following is a step by step breakdown of the entire flow:\n\n\n- You start working on a feature with updating a single file; let's call it\n`application_controller.rb`\n\n- This feature requires you to also update a view: `index.html.haml`\n\n- You added a partial which is used in index page: `_navigation.html.haml`\n\n- Styles for the page also need to be updated to reflect the partial we\nadded: `styles.css.scss`\n\n- Feature is now ready with the desired changes, time to also update tests;\nfiles to be updated are as follows:\n  - `application_controller_spec.rb`\n  - `navigation_spec.rb`\n- Tests are updated and passing as expected, now time to commit the changes!\n\n\nSince all the files belong to different territories of the architecture, we\ncommit the changes isolated of each other to ensure that each commit\nrepresents a certain context and is made in a certain order. I usually\nprefer backend -> frontend order where most backend-centric change is\ncommitted first, followed by the middle layer and then by frontend-centric\nchanges in the Git list commits.\n\n\n\n1.  `application_controller.rb` & `application_controller_spec.rb`; **Add\nroutes for navigation**.\n\n2.  `_navigation.html.haml` &  `navigation_spec.rb`; **Page Navigation\nView**.\n\n3.  `index.html.haml`; **Render navigation partial**.\n\n4.  `styles.css.scss`; **Add styles for navigation**.\n\n\nNow that we have our changes committed, we create a merge request with the\nbranch. Once you have merge request open, it typically gets reviewed by your\npeer before the changes are merged into repo's `master` branch. Now let's\nlearn what different situations we may end up with during code review.\n\n\n## Situation 1: How to change the most recent Git commit\n\n\nImagine a case where the reviewer looked at `styles.css.scss` and suggested\na change. In such a case, it is very simple to do the change as the\nstylesheet changes are part of **last** commit on your branch. Here's how we\ncan handle this;\n\n\n- You directly do the necessary changes to `styles.css.scss` in your current\nbranch.\n\n- Once you're done with the changes, add these changes to stage; run `git\nadd styles.css.scss`.\n\n- Once changes are staged, we need to _add_ these changes to our last\ncommit; run `git commit --amend`.\n  -  **Command breakdown**: Here, we're asking the `git commit` command to _amend_ whatever changes are present in stage to the most recent commit.\n- This will open your last commit in your Git-defined text editor which has\nthe commit message **Add styles for navigation**.\n\n- Since we only updated the CSS declaration, we don't need to alter the\ncommit message. At this point, you can just save and exit the text editor\nthat Git opened for you and your changes will be reflected in the commit.\n\n\nSince you modified an existing Git commit, these changes are required to be\n_force pushed_ to your remote repo using `git push --force-with-lease\n\u003Cremote_name> \u003Cbranch_name>`. This command will override the commit `Add\nstyles for navigation` on remote repo with updated commit that we just made\nin our local repo.\n\n\nOne thing to keep in mind while force pushing branches is that if you are\nworking on the same branch with multiple people, force pushing may cause\ntrouble for other users when they try to normally push their changes on a\nremote branch that has new commits force pushed. Hence, use this feature\nwisely. You can learn more about Git force push options\n[here](https://git-scm.com/docs/git-push#git-push---no-force-with-lease).\n\n\n## Situation 2: How to change a specific Git commit changes\n\n\nIn the previous situation, the Git commit change was rather simple as we had\nto modify only our last Git commit, but imagine if reviewer suggested to\nchange something in `_navigation.html.haml`. In this case, it is second\ncommit from the top, so changing it won't be as direct as it was in the\nfirst situation. Let's see how we can handle this:\n\n\nWhenever a commit is made in a branch, it is identified by a unique SHA-1\nhash string. Think of it as a unique ID that separates one commit from\nanother. You can view all the previous commits, along with their SHA-1\nhashes in a branch by running the `git log` command. With this, you would\nsee an output that looks somewhat as follows and is a list of commits, where\nthe most recent commits are at the top;\n\n\n```\n\ncommit aa0a35a867ed2094da60042062e8f3d6000e3952 (HEAD ->\nadd-page-navigation)\n\nAuthor: Kushal Pandya \u003Ckushal@gitlab.com>\n\nDate: Wed May 2 15:24:02 2018 +0530\n\n    Add styles for navigation\n\ncommit c22a3fa0c5cdc175f2b8232b9704079d27c619d0\n\nAuthor: Kushal Pandya \u003Ckushal@gitlab.com>\n\nDate: Wed May 2 08:42:52 2018 +0000\n\n    Render navigation partial\n\ncommit 4155df1cdc7be01c98b0773497ff65c22ba1549f\n\nAuthor: Kushal Pandya \u003Ckushal@gitlab.com>\n\nDate: Wed May 2 08:42:51 2018 +0000\n\n    Page Navigation View\n\ncommit 8d74af102941aa0b51e1a35b8ad731284e4b5a20\n\nAuthor: Kushal Pandya \u003Ckushal@gitlab.com>\n\nDate: Wed May 2 08:12:20 2018 +0000\n\n    Add routes for navigation\n```\n\n\nThis is where `git rebase` command comes into play. Whenever we wish to edit\na specific commit with `git rebase`, we need to first rebase our branch by\nmoving back HEAD to the point right _before_ the commit we wish to edit. In\nour case, we need to change the commit that reads `Page Navigation View`.\n\n\n![Commit\nLog](https://about.gitlab.com/images/blogimages/keeping-git-commit-history-clean/GitRebase.png){:\n.shadow.center.medium}\n\n\nHere, notice the hash of commit which is right before the commit we want to\nmodify; copy the hash and perform the following steps:\n\n\n- Rebase the branch to move to commit before our target commit; run `git\nrebase -i 8d74af102941aa0b51e1a35b8ad731284e4b5a20`\n  -  **Git command breakdown**: Here we're running Git's `rebase` command with _interactive_ mode with provided SHA-1 hash as commit to rebase to.\n- This will run rebase command for Git in interactive mode and will open\nyour text editor showing all of your commits that came _after_ the commit\nyou rebased to. It will look somewhat like this:\n\n\n```\n\npick 4155df1cdc7 Page Navigation View\n\npick c22a3fa0c5c Render navigation partial\n\npick aa0a35a867e Add styles for navigation\n\n\n# Rebase 8d74af10294..aa0a35a867e onto 8d74af10294 (3 commands)\n\n#\n\n# Commands:\n\n# p, pick = use commit\n\n# r, reword = use commit, but edit the commit message\n\n# e, edit = use commit, but stop for amending\n\n# s, squash = use commit, but meld into previous commit\n\n# f, fixup = like \"squash\", but discard this commit's log message\n\n# x, exec = run command (the rest of the line) using shell\n\n# d, drop = remove Git commit\n\n#\n\n# These lines can be re-ordered; they are executed from top to bottom.\n\n#\n\n# If you remove a line here THAT COMMIT WILL BE LOST.\n\n#\n\n# However, if you remove everything, the rebase will be aborted.\n\n#\n\n# Note that empty commits are commented out\n\n```\n\n\nNotice how each commit has a word `pick` in front of it, and in the contents\nbelow, there are all possible keywords we can use. Since we want to _edit_ a\ncommit, we need to change `pick 4155df1cdc7 Page Navigation View` to `edit\n4155df1cdc7 Page Navigation View`. Save the changes and exit editor.\n\n\nNow your branch is rebased to the point in time right before the commit you\nmade which included `_navigation.html.haml`. Open the file and perform\ndesired changes as per the review feedback. Once you're done with the\nchanges, stage them by running `git add _navigation.html.haml`.\n\n\nSince we have staged the changes, it is time to move branch HEAD back to the\ncommit we originally had (while also including the new changes we added),\nrun `git rebase --continue`, this will open your default editor in the\nterminal and show you the commit message that we edited during rebase; `Page\nNavigation View`. You can change this message if you wish, but we would\nleave it as it is for now, so save and exit the editor. At this point, Git\nwill replay all the commits that followed after the commit you just edited\nand now branch `HEAD` is back to the top commit we originally had, and it\nalso includes the new changes you made to one of the commits.\n\n\nSince we again modified a commit that's already present in remote repo, we\nneed force push this branch again using `git push --force-with-lease\n\u003Cremote_name> \u003Cbranch_name>`.\n\n\n## Situation 3: How to add, remove, or combine Git commits\n\n\nA common situation is when you've made several commits just to fix something\npreviously committed. Now let's reduce them as much as we can, combining\nthem with the original commits.\n\n\nAll you need to do is start the interactive rebase as you would in the other\nscenarios.\n\n\n```\n\npick 4155df1cdc7 Page Navigation View\n\npick c22a3fa0c5c Render navigation partial\n\npick aa0a35a867e Add styles for navigation\n\npick 62e858a322 Fix a typo\n\npick 5c25eb48c8 Ops another fix\n\npick 7f0718efe9 Fix 2\n\npick f0ffc19ef7 Argh Another fix!\n\n```\n\n\nNow imagine you want to combine all those fixes into `c22a3fa0c5c Render\nnavigation partial`. You just need to:\n\n\n1. Move the fixes up so that they are right below the commit you want to\nkeep in the end.\n\n2. Change `pick` to `squash` or `fixup` for each of the fixes.\n\n\n*Note:* `squash` keeps the git fix commit messages in the description.\n`fixup` will forget the commit messages of the fixes and keep the original.\n\n\nYou'll end up with something like this:\n\n\n```\n\npick 4155df1cdc7 Page Navigation View\n\npick c22a3fa0c5c Render navigation partial\n\nfixup 62e858a322 Fix a typo\n\nfixup 5c25eb48c8 Ops another fix\n\nfixup 7f0718efe9 Fix 2\n\nfixup f0ffc19ef7 Argh Another fix!\n\npick aa0a35a867e Add styles for navigation\n\n```\n\n\nSave the changes, exit the editor, and you're done! This is the resulting\nhistory:\n\n\n```\n\npick 4155df1cdc7 Page Navigation View\n\npick 96373c0bcf Render navigation partial\n\npick aa0a35a867e Add styles for navigation\n\n```\n\n\nAs before, all you need to do now is `git push --force-with-lease\n\u003Cremote_name> \u003Cbranch_name>` and the changes are up.\n\n\nIf you want to remove a Git commit from branch altogether, instead of\n`squash` or `fixup`, just write `drop` or simply delete that line.\n\n\n### How to avoid Git commit conflicts\n\n\nTo avoid conflicts, make sure the commits you're moving up the timeline\naren't touching the same files touched by the commits left after them.\n\n\n```\n\npick 4155df1cdc7 Page Navigation View\n\npick c22a3fa0c5c Render navigation partial\n\nfixup 62e858a322 Fix a typo                 # this changes styles.css\n\nfixup 5c25eb48c8 Ops another fix            # this changes image/logo.svg\n\nfixup 7f0718efe9 Fix 2                      # this changes styles.css\n\nfixup f0ffc19ef7 Argh Another fix!          # this changes styles.css\n\npick aa0a35a867e Add styles for navigation  # this changes index.html (no\nconflict)\n\n```\n\n\n### Pro-tip: Quick Git commit `fixup`s\n\n\nIf you know exactly which commit you want to fixup, when committing you\ndon't have to waste brain cycles thinking of good temporary names for \"Fix\n1\", \"Fix 2\", ..., \"Fix 42\".\n\n\n**Step 1: Meet `--fixup`**\n\n\nAfter you've staged the changes fixing whatever it is that needs fixing,\njust Git commit all the changes like this:\n\n\n```\n\ngit commit --fixup c22a3fa0c5c\n\n```\n\n(Note that this is the hash for the commit `c22a3fa0c5c Render navigation\npartial`)\n\n\nThis will generate this commit message: `fixup! Render navigation partial`.\n\n\n**Step 2: And the sidekick `--autosquash`**\n\n\nEasy interactive rebase. You can have `git` place the `fixup`s automatically\nin the right place.\n\n\n`git rebase -i 4155df1cdc7 --autosquash`\n\n\nHistory will be shown like so:\n\n```\n\npick 4155df1cdc7 Page Navigation View\n\npick c22a3fa0c5c Render navigation partial\n\nfixup 62e858a322 Fix a typo\n\nfixup 5c25eb48c8 Ops another fix\n\nfixup 7f0718efe9 Fix 2\n\nfixup f0ffc19ef7 Argh Another fix!\n\npick aa0a35a867e Add styles for navigation\n\n```\n\n\nReady for you to just review and proceed.\n\n\nIf you're feeling adventurous you can do a non-interactive rebase `git\nrebase --autosquash`, but only if you like living dangerously, as you'll\nhave no opportunity to review the squashes being made before they're\napplied.\n\n\n## Situation 4: My Git commit history doesn't make sense, I need a fresh\nstart!\n\n\nIf we're working on a large feature, it is common to have several fixup and\nreview-feedback changes that are being committed frequently. Instead of\nconstantly rebasing the branch, we can leave the cleaning up of Git commits\nuntil the end of development.\n\n\nThis is where creating patch files is extremely handy. In fact, patch files\nwere the primary way of sharing code over email while collaborating on large\nopen source projects before Git-based services like GitLab were available to\ndevelopers. Imagine you have one such branch (eg; `add-page-navigation`)\nwhere there are tons of commits that don't convey the underlying changes\nclearly. Here's how you can create a patch file for all the changes you made\nin this branch:\n\n\n- The first step to create the patch file is to make sure that your branch\nhas all the changes present from `master` branch and has no conflicts with\nthe same.\n\n- You can run `git rebase master` or `git merge master` while you're checked\nout in `add-page-navigation` branch to get all the changes from `master` on\nto your branch.\n\n- Now create the patch file; run `git diff master add-page-navigation >\n~/add_page_navigation.patch`.\n  -  **Command breakdown**: Here we're using Git's _diff_ feature, and asking for a diff between `master` branch and `add-page-navigation` branch, and _redirecting_ the output (via `>` symbol) to a file named `add_page_navigation.patch` in our user home directory (typically `~/` in *nix operating systems).\n- You can specify any path you wish to keep this file in and the file name\nand extension could be anything you want.\n\n- Once the command is run and you don't see any errors, the patch file is\ngenerated.\n\n- Now checkout `master` branch; run `git checkout master`.\n\n- Delete the branch `add-page-navigation` from local repo; run `git branch\n-D add-page-navigation`. Remember, we already have changes of this branch in\na created patch file.\n\n- Now create a new branch with the same name (while `master` is checked\nout); run `git checkout -b add-page-navigation`.\n\n- At this point, this is a fresh branch and doesn't have any of your\nchanges.\n\n- Finally, apply your changes from the patch file; `git apply\n~/add_page_navigation.patch`.\n\n- Here, all of your changes are applied in a branch and they will appear as\nuncommitted, as if all your modification where done, but none of the\nmodifications were actually committed in the branch.\n\n- Now you can go ahead and commit individual files or files grouped by area\nof impact in the order you want with concise commit messages.\n\n\nAs with previous situations, we basically modified the whole branch, so it\nis time to force push!\n\n\n## Git commit history: Conclusion\n\n\nWhile we have covered most common and basic situations that arise in a\nday-to-day workflow with Git, rewriting Git history is a vast topic and as\nyou get familiar with above tips, you can learn more advanced concepts\naround the subject in the [Git Official\nDocumentation](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History).\nHappy git'ing!\n\n\nPhoto by [pan\nxiaozhen](https://unsplash.com/photos/pj-BrFZ9eAA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non\n[Unsplash](https://unsplash.com/search/photos/clean?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n\n{: .note}\n",[1684,9],{"slug":2686,"featured":6,"template":699},"keeping-git-commit-history-clean","content:en-us:blog:keeping-git-commit-history-clean.yml","Keeping Git Commit History Clean","en-us/blog/keeping-git-commit-history-clean.yml","en-us/blog/keeping-git-commit-history-clean",{"_path":2692,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2693,"content":2699,"config":2705,"_id":2707,"_type":13,"title":2708,"_source":15,"_file":2709,"_stem":2710,"_extension":18},"/en-us/blog/keys-to-success-for-product-operations",{"title":2694,"description":2695,"ogTitle":2694,"ogDescription":2695,"noIndex":6,"ogImage":2696,"ogUrl":2697,"ogSiteName":685,"ogType":686,"canonicalUrls":2697,"schema":2698},"3 keys to success for product operations","Learn how to set a foundation for product operations at your organization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682313/Blog/Hero%20Images/prodops-keys-elena-mozhvilo-Lp9uH9s9fss-unsplash.jpg","https://about.gitlab.com/blog/keys-to-success-for-product-operations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 keys to success for product operations\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Farnoosh Seifoddini\"}],\n        \"datePublished\": \"2022-05-24\",\n      }",{"title":2694,"description":2695,"authors":2700,"heroImage":2696,"date":2702,"body":2703,"category":811,"tags":2704},[2701],"Farnoosh Seifoddini","2022-05-24","\n\nIt is official. Product operations is a thing. A quick Google search will pull up a long list of articles singing the praises of everything product operations has to offer, from making product managers more efficient to data collection and synthesis. \n\nWhen I first took on [product operations at GitLab](/direction/product-operations/), there wasn’t a lot of definition or guidance on the topic. I understood what product operations meant because I’d been “doing it” as an inseparable part of my product management and product leadership roles for some years. But I’d never had the opportunity to focus solely on product operations.\n\nAs excited as I was, I was also nervous. GitLab was [accelerating toward an IPO](/blog/gitlab-inc-takes-the-devops-platform-public/) and both the product management team and the product were in hyper growth mode. And, to boot, the all-remote, cross-functional teams were in motion, sync and async, day and night, all around the globe. So, I reached out to peers who had already started their product operations journey and leveraged the perspective, progress, and learnings they generously shared. And, in doing so, I realized everyone was doing it a bit differently. \n\nNow, two and a half years later, product operations is a thing at GitLab. And the most common question I get from peers reaching out to me is: How can I set up product operations for success at my organization? \n\nTo answer this question, I will assume we all want to be product-led and customer-centered, and “success” would be product operations helping us get there. I’ll also assume we agree with the sentiment that’s evolved [defining product operations responsibility](https://www.pendo.io/glossary/product-operations/) to fall into these core areas: tools, data, experimentation, strategy, and trusted advisor. \n\nWhile there is no one formula, I will share three keys that opened doors for product operations to make an impact and grow with GitLab.\n\n### 1. Empower product operations as its own function, with an equal seat alongside other value-driving functions\n\nAt GitLab, we run product operations as an independent function under the product umbrella. The direct line of responsibility to the head of all product ensures product operations has awareness, alignment, and accountability to the macro needs of the product and the business. This also allows product operations to maintain a broad and unbiased view, as well as the right level of influence, to develop strategies/tactics serving the product and the business without favor toward any particular group. This [Silicon Valley Product Group article](https://www.svpg.com/product-ops-overview/) by Marty Cagan provides more helpful context on the why of this approach. \n\n### 2. Make product operations a people-first operation\n\nBefore product operations can deliver on efficiencies and tools that are useful for the product and the business, product operations must understand all of its internal customers. The first year product operations took shape at GitLab, much of my energy was focused on building relationships, not only with product team members but across the whole organization. Becoming a trusted advisor runs deeper than just delivering data, it’s about sensing pain and building bridges. A product operations team that leads with empathy will elevate the organization rather than just serve the organization. \n\n### 3. Drive adoption of product operations strategies by providing opportunities for team ownership\n\nAt GitLab, [everyone can contribute](/company/mission/#everyone-can-contribute). Leveraging this mindset for product operations led to [more impactful and better-designed iterations](https://handbook.gitlab.com/handbook/values/#iteration) to the problems we were trying to solve. By collaborating with various team members across the organization to improve and implement the shared frameworks in the product system, we not only ensure better multi-dimensional solutions but also boost alignment and acceptance of the solutions as well. This approach also inspires team ownership of flexible workflows rather than a perception that product operations is the “enforcer” of rigid processes. \n\nThese three keys become more challenging to forge if they aren’t introduced to an organization early on. Even if not immediately feasible, it’s helpful to carve space for the philosophy upfront and start small to demonstrate the value of the approach as you build the foundation for product operations. In future posts, I will share strategies and tactics for each of these keys as well as answer the second most common question I get: What is a “product system”? \n\nIn the meantime, feel free to learn more about [what product operations drives](/direction/product-operations/) at GitLab and the product management resources we maintain in our [Product Handbook](/handbook/product/).\n\n\n\nCover image by [Elena Mozhvilo](https://unsplash.com/@miracleday?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)\n",[696,790,9,1241],{"slug":2706,"featured":6,"template":699},"keys-to-success-for-product-operations","content:en-us:blog:keys-to-success-for-product-operations.yml","Keys To Success For Product Operations","en-us/blog/keys-to-success-for-product-operations.yml","en-us/blog/keys-to-success-for-product-operations",{"_path":2712,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2713,"content":2719,"config":2725,"_id":2727,"_type":13,"title":2728,"_source":15,"_file":2729,"_stem":2730,"_extension":18},"/en-us/blog/kingfisher-transforming-the-developer-experience-with-gitlab",{"title":2714,"description":2715,"ogTitle":2714,"ogDescription":2715,"noIndex":6,"ogImage":2716,"ogUrl":2717,"ogSiteName":685,"ogType":686,"canonicalUrls":2717,"schema":2718},"Kingfisher transforming the developer experience with GitLab","Learn how the international company focuses on DevSecOps, including automation, to reduce complexity in workflows for better efficiency.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659756/Blog/Hero%20Images/REFERENCE_-_display_preview_for_blog_images.png","https://about.gitlab.com/blog/kingfisher-transforming-the-developer-experience-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Kingfisher transforming the developer experience with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2024-11-12\",\n      }",{"title":2714,"description":2715,"authors":2720,"heroImage":2716,"date":2721,"body":2722,"category":2723,"tags":2724},[690],"2024-11-12","Kingfisher plc, an international home improvement company, has leaned into GitLab’s end-to-end platform to help it build a DevSecOps foundation that is revolutionizing its developer experience. And the company plans to continue that improvement by increasing its use of platform features, focusing on security, simplifying its toolchain, and increasing the use of automation.\n\n> \u003Cimg align=\"left\" width=\"200\" height=\"200\" hspace=\"5\" vspace=\"5\" alt=\"Chintan Parmar\" src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176076/Blog/ro7u8p695zw9fllbk4j5.png\" style=\"float: left; margin-right: 25px;\"> “The whole point of this is to reduce friction for our engineers, taking away a lot of the complexity in their workflow, and bringing in best practices and governance,” says Chintan Parmar, site reliability engineering manager at Kingfisher. “In terms of what we've done and what we're doing at the moment, it really is about building a foundation in terms of CI/CD and changing the way we deploy to bring in consistency and improve the developer experience.”\n\nParmar talked about his team and their efforts during the [GitLab DevSecOps World Tour event](https://about.gitlab.com/events/devsecops-world-tour/) in London last month. In an on-stage interview with Sherrod Patching, vice president of Customer Success Management at GitLab, he laid out Kingfisher’s journey with the platform, which is enabling its teams, while also making it easier and faster to move software updates and new projects from ideation to deployment.\n\n[Kingfisher](https://www.kingfisher.com/en/index.html) is a parent company with more than 2,000 stores in eight countries across Europe. Listed on the London Stock Exchange and part of the Financial Times Stock Exchange (FTSE) 100 Index, the group reported £13 billion in total revenue in FY 2023/24. Its brands include B&Q, Screwfix, Castorama, and Brico Depot. \n\nThe company first adopted GitLab in 2016, using a free starter license, and then moved to Premium in 2020. In that time, it also has moved from on-premise to a cloud environment, started using shared GitLab runners and source code management, and began building out a CI/CD library that gives team members easy access to standardized and reusable components for typical pipeline stages, such as build, deploy, and test.\n\n## Tracking metrics that execs care about\n\nKingfisher also is tracking metrics, like deployment frequency, lead time to change, and change failure rates, with GitLab. And teams are analyzing value streams, mapping workflows, and finding bottlenecks. All of those metrics are being translated into data that company leaders can sink their teeth into. \n\n“Execs may not care about whether a merge request has been waiting 15 or 20 minutes, but they do care about how we translate that time value into dollars or pounds,” says Parmar, who used GitLab when he previously worked at [Dunelm Group, plc,](https://about.gitlab.com/customers/dunelm/) another major UK-based retailer. “Kingfisher is a very data-driven organization. We are looking to overlay these metrics to see where we can continue to improve our developer experience, eliminating slowdowns and manual tasks, while increasing automation.”\n\nWhile on-stage, Parmar made it clear that all the changes being made are aimed at improving software development and deployment. However, it’s equally paramount to making team members’ jobs easier, giving them more time and autonomy to do the kind of work they enjoy, instead of what can seem like a never-ending stream of repetitive, manual tasks. He noted that the team is so focused on easing workflows and giving engineers more time to be innovative, it has created a “developer experience squad.”\n\n## Putting people first while laying out priorities\n\nSo what’s coming next for Kingfisher and its engineering squads, which have about 600 practitioners?  \n\nAccording to Parmar, Kingfisher already has its priorities mapped out. Using GitLab to [move security left](https://about.gitlab.com/solutions/security-compliance/) is at the top of their list. The group also is focused on continuing to reduce its toolchain, and using automation to increase productivity. And he expects that early in 2025, teams will begin “dabbling” with the artificial intelligence capabilities in [GitLab Duo](https://about.gitlab.com/gitlab-duo/), a suite of AI-powered features in the platform that help increase velocity and solve key pain points across the software development lifecycle. Kingfisher will focus on how that can further increase its efficiency and productivity. \n\nTo get all of this done, Parmar says the first step is to ensure that people come first.\n\n“We’re focused on the hearts and minds of our people... and remembering that people can be attached to how they work through pipelines,” he adds. “People have different ways of building their pipelines. We need to understand what they need, what their workflows look like, and then work with them to find the right solution. After, we’ll go back to them with data that shows the improvements worked. So instead of telling them what they need, we find out what that is, and fix what’s slowing them down. That builds a very good rapport with our engineers.”\n\nChanging how a team creates and deploys software is a journey. Parmar suggests that collaboratively taking developers and security teams on that journey, instead of dragging them along, makes a big difference in ease of migration and in easing team members’ user experience.\n\n> Learn [how other GitLab customers use the DevSecOps platform](https://about.gitlab.com/customers/) to gain results for customers.\n","customer-stories",[1096,495,1159,9],{"slug":2726,"featured":90,"template":699},"kingfisher-transforming-the-developer-experience-with-gitlab","content:en-us:blog:kingfisher-transforming-the-developer-experience-with-gitlab.yml","Kingfisher Transforming The Developer Experience With Gitlab","en-us/blog/kingfisher-transforming-the-developer-experience-with-gitlab.yml","en-us/blog/kingfisher-transforming-the-developer-experience-with-gitlab",{"_path":2732,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2733,"content":2739,"config":2745,"_id":2747,"_type":13,"title":2748,"_source":15,"_file":2749,"_stem":2750,"_extension":18},"/en-us/blog/learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions",{"title":2734,"description":2735,"ogTitle":2734,"ogDescription":2735,"noIndex":6,"ogImage":2736,"ogUrl":2737,"ogSiteName":685,"ogType":686,"canonicalUrls":2737,"schema":2738},"Learn advanced Rust programming with a little help from AI","Use this guided tutorial, along with AI-powered GitLab Duo Code Suggestions, to continue learning advanced Rust programming.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662439/Blog/Hero%20Images/codewithheart.png","https://about.gitlab.com/blog/learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learn advanced Rust programming with a little help from AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-10-12\",\n      }",{"title":2734,"description":2735,"authors":2740,"heroImage":2736,"date":2741,"body":2742,"category":764,"tags":2743},[2491],"2023-10-12","When I started learning a new programming language more than 20 years ago,\nwe had access to the Visual Studio 6 MSDN library, installed from 6 CD-ROMs.\nAlgorithms with pen and paper, design pattern books, and MSDN queries to\nfigure out the correct type were often time-consuming. Learning a new\nprogramming language changed fundamentally in the era of remote\ncollaboration and artificial intelligence (AI). Now you can spin up a\n[remote development\nworkspace](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/),\nshare your screen, and engage in a group programming session. With the help\nof [GitLab Duo Code Suggestions](/gitlab-duo/), you always have an\nintelligent partner at your fingertips. Code Suggestions can learn from your\nprogramming style and experience. They only need input and context to\nprovide you with the most efficient suggestions.\n\n\nIn this tutorial, we build on the [getting started blog\npost](/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/)\nand design and create a simple feed reader application.\n\n\n- [Preparations](#preparations)\n    - [Code Suggestions](#code-suggestions)\n- [Continue learning Rust](#continue-learning-rust)\n    - [Hello, Reader App](#hello-reader-app)\n    - [Initialize project](#initialize-project)\n    - [Define RSS feed URLs](#define-rss-feed-urls)\n- [Modules](#modules)\n    - [Call the module function in main()](#call-the-module-function-in-main)\n- [Crates](#crates)\n    - [feed-rs: parse XML feed](#feed-rs-parse-xml-feed)\n- [Runtime configuration: Program\narguments](#runtime-configuration-program-arguments)\n    - [User input error handling](#user-input-error-handling)\n- [Persistence and data storage](#persistence-and-data-storage)\n\n- [Optimization](#optimization)\n    - [Asynchronous execution](#asynchronous-execution)\n    - [Spawning threads](#spawning-threads)\n    - [Function scopes, threads, and closures](#function-scopes-threads-and-closures)\n- [Parse feed XML into objects](#parse-feed-xml-into-object-types)\n    - [Map generic feed data types](#map-generic-feed-data-types)\n    - [Error handling with Option::unwrap()](#error-handling-with-option-unwrap)\n- [Benchmarks](#benchmarks)\n    - [Sequential vs. Parallel execution benchmark](#sequential-vs-parallel-execution-benchmark)\n    - [CI/CD with Rust caching](#cicd-with-rust-caching)\n- [What is next](#what-is-next)\n    - [Async learning exercises](#async-learning-exercises)\n    - [Share your feedback](#share-your-feedback)\n\n## Preparations\n\nBefore diving into the source code, make sure to set up [VS\nCode](/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/#vs-code)\nand [your development environment with\nRust](/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/#development-environment-for-rust).\n\n\n### Code Suggestions\n\nFamiliarize yourself with suggestions before actually verifying the\nsuggestions. GitLab Duo Code Suggestions are provided as you type, so you do\nnot need use specific keyboard shortcuts. To accept a code suggestion, press\nthe `tab` key. Also note that writing new code works more reliably than\nrefactoring existing code. AI is non-deterministic, which means that the\nsame suggestion may not be repeated after deleting the code suggestion.\nWhile Code Suggestions is in Beta, we are working on improving the accuracy\nof generated content overall. Please review the [known\nlimitations](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#known-limitations),\nas this could affect your learning experience.\n\n\n**Tip:** The latest release of Code Suggestions supports multi-line\ninstructions. You can refine the specifications to your needs to get better\nsuggestions.\n\n\n```rust\n    // Create a function that iterates over the source array\n    // and fetches the data using HTTP from the RSS feed items.\n    // Store the results in a new hash map.\n    // Print the hash map to the terminal.\n```\n\n\nThe VS Code extension overlay is shown when offering a suggestion. You can\nuse the `tab` key to accept the suggested line(s), or `cmd cursor right` to\naccept one word. Additionally, the three dots menu allows you to always show\nthe toolbar.\n\n\n![VS Code GitLab Duo Code Suggestions overlay with\ninstructions](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_code_suggestions_options_overlay_keep_toolbar.png){:\n.shadow}\n\n\n## Continue learning Rust\n\nNow, let us continue learning Rust, which is one of the [supported languages\nin Code\nSuggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#supported-languages).\n[Rust by Example](https://doc.rust-lang.org/rust-by-example/) provides an\nexcellent tutorial for beginners, together with the official [Rust\nbook](https://doc.rust-lang.org/book/). Both resources are referenced\nthroughout this blog post.\n\n\n### Hello, Reader App\n\nThere are many ways to create an application and learn Rust. Some of them\ninvolve using existing Rust libraries - so-called `Crates`. We will use them\na bit further into the blog post. For example, you could create a\ncommand-line app that processes images and writes the result to a file.\nSolving a classic maze or writing a Sudoku solver can also be a fun\nchallenge. Game development is another option. The book [Hands-on\nRust](https://hands-on-rust.com/) provides a thorough learning path by\ncreating a dungeon crawler game. My colleague Fatima Sarah Khalid started\nthe [Dragon Realm in C++ with a little help from\nAI](/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions/)\n-- check it out, too.\n\n\nHere is a real use case that helps solve an actual problem: Collecting\nimportant information from different sources into RSS feeds for (security)\nreleases, blog posts, and social discussion forums like Hacker News. Often,\nwe want to filter for specific keywords or versions mentioned in the\nupdates. These requirements allow us to formulate a requirements list for\nour application:\n\n\n1. Fetch data from different sources (HTTP websites, REST API, RSS feeds).\nRSS feeds in the first iteration.\n\n1. Parse the data.\n\n1. Present the data to the user, or write it to disk.\n\n1. Optimize performance.\n\n\nThe following example application output will be available after the\nlearning steps in this blog post:\n\n\n![VS Code Terminal, cargo run with formatted feed entries\noutput](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_cargo_run_formatted_output_final.png)\n\n\nThe application should be modular and build the foundation to add more data\ntypes, filters, and hooks to trigger actions at a later point.\n\n\n### Initialize project\n\nReminder: `cargo init` in the project root creates the file structure,\nincluding the `main()` entrypoint. Therefore, we will learn how to create\nand use Rust modules in the next step.\n\n\nCreate a new directory called `learn-rust-ai-app-reader`, change into it and\nrun `cargo init`. This command implicitly runs `git init` to initialize a\nnew Git repository locally. The remaining step is to configure the Git\nremote repository path, for example,\n`https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader`.\nPlease adjust the path for your namespace. Pushing the Git repository\n[automatically creates a new private project in\nGitLab](https://docs.gitlab.com/ee/user/project/#create-a-new-project-with-git-push).\n\n\n```shell\n\nmkdir learn-rust-ai-app-reader\n\ncd learn-rust-ai-app-reader\n\n\ncargo init\n\n\ngit remote add origin\nhttps://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader.git\n\ngit push --set-upstream origin main\n\n```\n\n\nOpen VS Code from the newly created directory. The `code` CLI will spawn a\nnew VS Code window on macOS.\n\n\n```shell\n\ncode .\n\n```\n\n\n### Define RSS feed URLs\n\nAdd a new hashmap to store the RSS feed URLs inside the `src/main.rs` file\nin the `main()` function. You can instruct GitLab Duo Code Suggestions with\na multi-line comment to create a\n[`HashMap`](https://doc.rust-lang.org/stable/std/collections/struct.HashMap.html)\nobject, and initialize it with default values for Hacker News, and\nTechCrunch. Note: Verify that the URLs are correct when you get suggestions.\n\n\n```rust\n\nfn main() {\n    // Define RSS feed URLs in the variable rss_feeds\n    // Use a HashMap\n    // Add Hacker News and TechCrunch\n    // Ensure to use String as type\n\n}\n\n```\n\n\nNote that the code comment provides instructions for:\n\n\n1. The variable name `rss_feeds`.\n\n2. The `HashMap` type.\n\n3. Initial seed key/value pairs.\n\n4. String as type (can be seen with `to_string()` calls).\n\n\nOne possible suggested path can be as follows:\n\n\n```rust\n\nuse std::collections::HashMap;\n\n\nfn main() {\n    // Define RSS feed URLs in the variable rss_feeds\n    // Use a HashMap\n    // Add Hacker News and TechCrunch\n    // Ensure to use String as type\n    let rss_feeds = HashMap::from([\n        (\"Hacker News\".to_string(), \"https://news.ycombinator.com/rss\".to_string()),\n        (\"TechCrunch\".to_string(), \"https://techcrunch.com/feed/\".to_string()),\n    ]);\n\n}\n\n```\n\n\n![VS Code with Code Suggestions for RSS feed URLs for Hacker News and\nTechCrunch](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_main_array_rss_feed_urls_suggested.png)\n\n\nOpen a new terminal in VS Code (cmd shift p - search for `terminal`), and\nrun `cargo build` to build the changes. The error message instructs you to\nadd the `use std::collections::HashMap;` import.\n\n\nThe next step is to do something with the RSS feed URLs. [The previous blog\npost](/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/)\ntaught us to split code into functions. We want to organize the code more\nmodularly for our reader application, and use Rust modules.\n\n\n## Modules\n\n[Modules](https://doc.rust-lang.org/rust-by-example/mod.html) help with\norganizing code. They can also be used to hide functions into the module\nscope, limiting access to them from the main() scope. In our reader\napplication, we want to fetch the RSS feed content, and parse the XML\nresponse. The `main()` caller should only be able to access the\n`get_feeds()` function, while other functionality is only available in the\nmodule.\n\n\nCreate a new file `feed_reader.rs` in the `src/` directory. Instruct Code\nSuggestions to create a public module named `feed_reader`, and a public\nfunction `get_feeds()` with a String HashMap as input. Important: The file\nand module names need to be the same, following the [Rust module\nstructure](https://doc.rust-lang.org/book/ch07-02-defining-modules-to-control-scope-and-privacy.html).\n\n\n![Code Suggestions: Create public module, with function and input\ntypes](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_public_module_function_input.png){:\n.shadow}\n\n\nInstructing Code Suggestions with the input variable name and type will also\nimport the required `std::collections::HashMap` module. Tip: Experiment with\nthe comments, and refine the variable types to land the best results.\nPassing function parameters as object references is considered best practice\nin Rust, for example.\n\n\n```rust\n\n// Create public module feed_reader\n\n// Define get_feeds() function which takes rss_feeds as String HashMap\nreference as input\n\npub mod feed_reader {\n    use std::collections::HashMap;\n\n    pub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n        // Do something with the RSS feeds\n    }\n}\n\n```\n\n\n![Code Suggestions: Public module with `get_feeds()` function, and suggested\ninput\nvariable](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_public_module_function_input.png){:\n.shadow}\n\n\nInside the function, continue to instruct Code Suggestions with the\nfollowing steps:\n\n\n1. `// Iterate over the RSS feed URLs`\n\n2. `// Fetch URL content`\n\n3. `// Parse XML body`\n\n4. `// Print the result`\n\n\n![Code Suggestions: Public module with `get_feeds()` function, step 1:\nIterate](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_01_iterate.png){:\n.shadow}\n\n\n![Code Suggestions: Public module with `get_feeds()` function, step 2: Fetch\nURL\ncontent](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_02_fetch_content.png){:\n.shadow}\n\n\n![Code Suggestions: Public module with `get_feeds()` function, step 3: Parse\nXML\nbody](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_03_parse_body.png){:\n.shadow}\n\n\n![Code Suggestions: Public module with `get_feeds()` function, step 4: Print\nthe\nresults](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_04_print_result.png){:\n.shadow}\n\n\nThe following code can be suggested:\n\n\n```rust\n\n// Create public module feed_reader\n\n// Define get_feeds() function which takes rss_feeds as String HashMap\nreference as input\n\npub mod feed_reader {\n    use std::collections::HashMap;\n\n    pub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n        // Iterate over the RSS feed URLs\n        for (name, url) in rss_feeds {\n            println!(\"{}: {}\", name, url);\n\n            // Fetch URL content\n            let body = reqwest::blocking::get(url).unwrap().text().unwrap();\n\n            // Parse XML body\n            let parsed_body = roxmltree::Document::parse(&body).unwrap();\n\n            // Print the result\n            println!(\"{:#?}\", parsed_body);\n        }\n    }\n}\n\n```\n\n\nYou see a new keyword here:\n[`unwrap()`](https://doc.rust-lang.org/rust-by-example/error/option_unwrap.html).\nRust does not support `null` values, and uses the [`Option`\ntype](https://doc.rust-lang.org/rust-by-example/std/option.html) for any\nvalue. If you are certain to use a specific wrapped type, for example,\n`Text` or `String`, you can call the `unwrap()` method to get the value. The\n`unwrap()` method will panic if the value is `None`.\n\n\n**Note** Code Suggestions referred to the `reqwest::blocking::get` function\nfor the `// Fetch URL content` comment instruction. The [`reqwest`\ncrate](https://docs.rs/reqwest/latest/reqwest/) name is intentional and not\na typo. It provides a convenient, higher-level HTTP client for async and\nblocking requests.\n\n\nParsing the XML body is tricky - you might get different results, and the\nschema is not the same for every RSS feed URL. Let us try to call the\n`get_feeds()` function, and then work on improving the code.\n\n\n### Call the module function in main()\n\n\nThe main() function does not know about the `get_feeds()` function yet, so\nwe need to import its module. In other programming languages, you might have\nseen the keywords `include` or `import`. The Rust module system is\ndifferent.\n\n\nModules are organized in path directories. In our example, both source files\nexist on the same directory level. `feed_reader.rs` is interpreted as crate,\ncontaining one module called `feed_reader`, which defines the function\n`get_feeds()`.\n\n\n```\n\nsrc/\n  main.rs\n  feed_reader.rs\n```\n\n\nIn order to access `get_feeds()` from the `feed_reader.rs` file, we need to\n[bring module\npath](https://doc.rust-lang.org/book/ch07-04-bringing-paths-into-scope-with-the-use-keyword.html)\ninto the `main.rs` scope first, and then call the full function path.\n\n\n```rust\n\nmod feed_reader;\n\n\nfn main() {\n\n    feed_reader::feed_reader::get_feeds(&rss_feeds);\n\n```\n\n\nAlternatively, we can import the full function path with the `use` keyword,\nand later use the short function name.\n\n\n```rust\n\nmod feed_reader;\n\nuse feed_reader::feed_reader::get_feeds;\n\n\nfn main() {\n\n    get_feeds(&rss_feeds);\n\n```\n\n\n**Tip:** I highly recommend reading the [Clear explanation of the Rust\nmodule system blog\npost](https://www.sheshbabu.com/posts/rust-module-system/) to get a better\nvisual understanding.\n\n\n```diff\n\n\nfn main() {\n    // ...\n\n    // Print feed_reader get_feeds() output\n    println!(\"{}\", feed_reader::get_feeds(&rss_feeds));\n```\n\n\n```rust\n\nuse std::collections::HashMap;\n\n\nmod feed_reader;\n\n// Alternative: Import full function path\n\n//use feed_reader::feed_reader::get_feeds;\n\n\nfn main() {\n    // Define RSS feed URLs in the variable rss_feeds\n    // Use a HashMap\n    // Add Hacker News and TechCrunch\n    // Ensure to use String as type\n    let rss_feeds = HashMap::from([\n        (\"Hacker News\".to_string(), \"https://news.ycombinator.com/rss\".to_string()),\n        (\"TechCrunch\".to_string(), \"https://techcrunch.com/feed/\".to_string()),\n    ]);\n\n    // Call get_feeds() from feed_reader module\n    feed_reader::feed_reader::get_feeds(&rss_feeds);\n    // Alternative: Imported full path, use short path here.\n    //get_feeds(&rss_feeds);\n}\n\n```\n\n\nRun `cargo build` in the terminal again to build the code.\n\n\n```shell\n\ncargo build\n\n```\n\n\nPotential build errors when Code Suggestions refer to common code and\nlibraries for HTTP requests, and XML parsing:\n\n\n1. Error: `could not find blocking in reqwest`. Solution: Enable the\n`blocking` feature for the crate in `Config.toml`: `reqwest = { version =\n\"0.11.20\", features = [\"blocking\"] }`.\n\n2. Error: `failed to resolve: use of undeclared crate or module reqwest`.\nSolution: Add the `reqwest` crate.\n\n3. Error: `failed to resolve: use of undeclared crate or module roxmltree`.\nSolution: Add the `roxmltree` crate.\n\n\n```shell\n\nvim Config.toml\n\n\nreqwest = { version = \"0.11.20\", features = [\"blocking\"] }\n\n```\n\n\n```shell\n\ncargo add reqwest\n\ncargo add roxmltree\n\n```\n\n\n**Tip:** Copy the error message string, with a leading `Rust \u003Cerror\nmessage>` into your preferred browser to check whether a missing crate is\navailable. Usually this search leads to a result on crates.io and you can\nadd the missing dependencies.\n\n\nWhen the build is successful, run the code with `cargo run` and inspect the\nHacker News RSS feed output.\n\n\n![VS Code terminal, cargo run to fetch Hacker News XML\nfeed](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_fetch_rss_feed_output_hacker_news.png){:\n.shadow}\n\n\nWhat is next with parsing the XML body into human-readable format? In the\nnext section, we will learn about existing solutions and how Rust crates\ncome into play.\n\n\n## Crates\n\nRSS feeds share a common set of protocols and specifications. It feels like\nreinventing the wheel to parse XML items and understand the lower object\nstructure. Recommendation for these types of tasks: Look whether someone\nelse had the same problem already and might have created code to solve the\nproblem.\n\n\nReusable library code in Rust is organized in so-called\n[`Crates`](https://doc.rust-lang.org/rust-by-example/crates.html), and made\navailable in packages, and the package registry on crates.io. You can add\nthese dependencies to your project by editing the `Config.toml` in the\n`[dependencies]` section, or using `cargo add \u003Cname>`.\n\n\nFor the reader app, we want to use the [feed-rs\ncrate](https://crates.io/crates/feed-rs). Open a new terminal, and run the\nfollowing command:\n\n\n```shell\n\ncargo add feed-rs\n\n```\n\n\n![VS Code Terminal Terminal: Add crate, verify in\nConfig.toml](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_rust_crate_add_feed-rs_explained.png)\n\n\n### feed-rs: parse XML feed\n\nNavigate into `src/feed_reader.rs` and modify the part where we parse the\nXML body. Code Suggestions understands how to call the `feed-rs` crate\n`parser::parse` function -- there is only one specialty here: `feed-rs`\n[expects string input as raw\nbytes](https://docs.rs/feed-rs/latest/feed_rs/parser/fn.parse_with_uri.html)\nto determine the encoding itself. We can provide instructions in the comment\nto get the expected result though.\n\n\n```rust\n            // Parse XML body with feed_rs parser, input in bytes\n            let parsed_body = feed_rs::parser::parse(body.as_bytes()).unwrap();\n```\n\n\n![Code Suggestions: Public module with `get_feeds()` function, step 5:\nModify XML parser to\nfeed-rs](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_05_use_feed_rs_to_parse.png){:\n.shadow}\n\n\nThe benefit of using `feed-rs` is not immediately visible until you see the\nprinted output with `cargo run`: All keys and values are mapped to their\nrespective Rust object types, and can be used for further operations.\n\n\n![VS Code terminal, cargo run to fetch Hacker News XML\nfeed](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_fetch_rss_feed_output_hacker_news_feed_rs.png){:\n.shadow}\n\n\n## Runtime configuration: Program arguments\n\nUntil now, we have run the program with hard-coded RSS feed values compiled\ninto the binary. The next step is allowing to configure the RSS feeds at\nruntime.\n\n\nRust provides [program\narguments](https://doc.rust-lang.org/rust-by-example/std_misc/arg.html) in\nthe standard misc library. [Parsing the\narguments](https://doc.rust-lang.org/rust-by-example/std_misc/arg/matching.html)\nprovides a better and faster learning experience than aiming for advanced\nprogram argument parsers (for example, the\n[clap](https://docs.rs/clap/latest/clap/) crate), or moving the program\nparameters into a configuration file and format\n([TOML](https://toml.io/en/), YAML). You are reading these lines after I\ntried and failed with different routes for the best learning experience.\nThis should not stop you from taking the challenge to configure RSS feeds in\nalternative ways.\n\n\nAs a boring solution, the command parameters can be passed as `\"name,url\"`\nstring value pairs, and then are split by the `,` character to extract the\nname and URL values. The comment instructs Code Suggestions to perform these\noperations and extend the `rss_feeds` HashMap with the new values. Note that\nthe variable might not be mutable, and, therefore, needs to be modified to\n`let mut rss_feeds`.\n\n\nNavigate into `src/main.rs` and add the following code to the `main()`\nfunction after the `rss_feeds` variable. Start with a comment to define the\nprogram arguments, and check the suggested code snippets.\n\n\n```rust\n    // Program args, format \"name,url\"\n    // Split value by , into name, url and add to rss_feeds\n```\n\n\n![Code suggestions for program arguments, and splitting name,URL values for\nthe rss_feeds\nvariable](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_program_args_boring_solution.png){:\n.shadow}\n\n\nThe full code example can look like the following:\n\n\n```rust\n\nfn main() {\n    // Define RSS feed URLs in the variable rss_feeds\n    // Use a HashMap\n    // Add Hacker News and TechCrunch\n    // Ensure to use String as type\n    let mut rss_feeds = HashMap::from([\n        (\"Hacker News\".to_string(), \"https://news.ycombinator.com/rss\".to_string()),\n        (\"TechCrunch\".to_string(), \"https://techcrunch.com/feed/\".to_string()),\n    ]);\n\n    // Program args, format \"name,url\"\n    // Split value by , into name, url and add to rss_feeds\n    for arg in std::env::args().skip(1) {\n        let mut split = arg.split(\",\");\n        let name = split.next().unwrap();\n        let url = split.next().unwrap();\n        rss_feeds.insert(name.to_string(), url.to_string());\n    }\n\n    // Call get_feeds() from feed_reader module\n    feed_reader::feed_reader::get_feeds(&rss_feeds);\n    // Alternative: Imported full path, use short path here.\n    //get_feeds(&rss_feeds);\n}\n\n```\n\n\nYou can pass program arguments directly to the `cargo run` command,\npreceding the arguments with `--`. Enclose all arguments with double quotes,\nput the name followed by a comma and the RSS feed URL as argument. Separate\nall arguments with whitespaces.\n\n\n```\n\ncargo build\n\n\ncargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\"\n\"CNCF,https://www.cncf.io/feed/\"\n\n```\n\n\n![VS Code terminal, RSS feed output example for the GitLab\nblog](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_gitlab_blog_rss_feed_example.png){:\n.shadow}\n\n\n### User input error handling\n\nIf the provided user input does not match the program expectation, we need\nto [throw an error](https://doc.rust-lang.org/rust-by-example/error.html)\nand help the caller to fix the program arguments. For example, passing a\nmalformed URL format should be treated as a runtime error. Instruct Code\nSuggestions with a code comment to throw an error if the URL is not valid.\n\n\n```rust\n    // Ensure that URL contains a valid format, otherwise throw an error\n```\n\n\nOne possible solution is to check if the `url` variable starts with\n`http://` or `https://`. If not, throw an error using the [panic!\nmacro](https://doc.rust-lang.org/rust-by-example/std/panic.html). The full\ncode example looks like the following:\n\n\n```rust\n    // Program args, format \"name,url\"\n    // Split value by , into name, url and add to rss_feeds\n    for arg in std::env::args().skip(1) {\n        let mut split = arg.split(\",\");\n        let name = split.next().unwrap();\n        let url = split.next().unwrap();\n\n        // Ensure that URL contains a valid format, otherwise throw an error\n        if !url.starts_with(\"http://\") && !url.starts_with(\"https://\") {\n            panic!(\"Invalid URL format: {}\", url);\n        }\n\n        rss_feeds.insert(name.to_string(), url.to_string());\n    }\n```\n\n\nTest the error handling with removing a `:` in one of the URL strings. Add\nthe `RUST_BACKTRACE=full` environment variable to get more verbose output\nwhen the `panic()` call happens.\n\n\n```\n\nRUST_BACKTRACE=full cargo run -- \"GitLab\nBlog,https://about.gitlab.com/atom.xml\" \"CNCF,https//www.cncf.io/feed/\"\n\n```\n\n\n![VS Code Terminal with wrong URL format, panic error\nbacktrace](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_url_format_error_panic_backtrace.png){:\n.shadow}\n\n\n## Persistence and data storage\n\nThe boring solution for storing the feed data is to dump the parsed body\ninto a new file. Instruct Code Suggestions to use a pattern that includes\nthe RSS feed name, and the current ISO date.\n\n\n```rust\n    // Parse XML body with feed_rs parser, input in bytes\n    let parsed_body = feed_rs::parser::parse(body.as_bytes()).unwrap();\n\n    // Print the result\n    println!(\"{:#?}\", parsed_body);\n\n    // Dump the parsed body to a file, as name-current-iso-date.xml\n    let now = chrono::offset::Local::now();\n    let filename = format!(\"{}-{}.xml\", name, now.format(\"%Y-%m-%d\"));\n    let mut file = std::fs::File::create(filename).unwrap();\n    file.write_all(body.as_bytes()).unwrap();\n```\n\n\nA possible suggestion will include using the [chrono\ncrate](https://crates.io/crates/chrono). Add it using `cargo add chrono` and\nthen invoke `cargo build` and `cargo run` again.\n\n\nThe files are written into the same directory where `cargo run` was\nexecuted. If you are executing the binary direcly in the `target/debug/`\ndirectory, all files will be dumped there.\n\n\n![VS Code with CNCF RSS feed content file, saved on\ndisk](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_cncf_rss_feed_saved_on_disk.png)\n\n\n## Optimization\n\nThe entries in the `rss_feeds` variable are executed sequentially. Imagine\nhaving a list of 100+ URLs configured - this could take a long time to fetch\nand process. What if we could execute multiple fetch requests in parallel?\n\n\n### Asynchronous execution\n\nRust provides [threads](https://doc.rust-lang.org/book/ch16-01-threads.html)\nfor asynchronous execution.\n\n\nThe simplest solution will be spawning a thread for each RSS feed URL. We\nwill discuss optimization strategies later. Before you continue with\nparallel execution, measure the sequential code execution time by preceding\nthe `time` command with `cargo run`.\n\n\n```\n\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\"\n\"CNCF,https://www.cncf.io/feed/\"\n\n\n0.21s user 0.08s system 10% cpu 2.898 total\n\n```\n\n\nNote that this exercise could require more manual code work. It is\nrecommended to persist the sequential working state in a new Git commit and\nbranch `sequential-exec`, to better compare the impact of parallel\nexecution.\n\n\n```shell\n\ngit commit -avm \"Sequential execution working\"\n\ngit checkout -b sequential-exec\n\ngit push -u origin sequential-exec\n\n\ngit checkout main\n\n```\n\n\n### Spawning threads\n\nOpen `src/feed_reader.rs` and refactor the `get_feeds()` function. Start\nwith a Git commit for the current state, and then delete the contents of the\nfunction scope. Add the following code comments with instructions for Code\nSuggestions:\n\n\n1. `// Store threads in vector`: Store thread handles in a vector, so we can\nwait for them to finish at the end of the function call.\n\n2. `// Loop over rss_feeds and spawn threads`: Create boilerplate code for\niterating over all RSS feeds, and spawn a new thread.\n\n\nAdd the following `use` statements to work with the `thread` and `time`\nmodules.\n\n\n```rust\n    use std::thread;\n    use std::time::Duration;\n```\n\n\nContinue writing the code, and close the for loop. Code Suggestions will\nautomatically propose adding the thread handle in the `threads` vector\nvariable, and offer to join the threads at the end of the function.\n\n\n```rust\n    pub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n\n        // Store threads in vector\n        let mut threads: Vec\u003Cthread::JoinHandle\u003C()>> = Vec::new();\n\n        // Loop over rss_feeds and spawn threads\n        for (name, url) in rss_feeds {\n            let thread_name = name.clone();\n            let thread_url = url.clone();\n            let thread = thread::spawn(move || {\n\n            });\n            threads.push(thread);\n        }\n\n        // Join threads\n        for thread in threads {\n            thread.join().unwrap();\n        }\n    }\n```\n\n\nAdd the `thread` crate, build and run the code again.\n\n\n```shell\n\ncargo add thread\n\n\ncargo build\n\n\ncargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\"\n\"CNCF,https://www.cncf.io/feed/\"\n\n```\n\n\nAt this stage, no data is processed or printed. Before we continue re-adding\nthe functionality, let us learn about the newly introduced keywords here.\n\n\n### Function scopes, threads, and closures\n\nThe suggested code brings new keywords and design patterns to learn. The\nthread handle is of the type `thread::JoinHandle`, indicating that we can\nuse it to wait for the threads to finish\n([join()](https://doc.rust-lang.org/book/ch16-01-threads.html#waiting-for-all-threads-to-finish-using-join-handles)).\n\n\n`thread::spawn()` spawns a new thread, where we can pass a function object.\nIn this case, a\n[closure](https://doc.rust-lang.org/book/ch13-01-closures.html) expression\nis passed as anonymous function. Closure inputs are passed using the `||`\nsyntax. You will recognize the [`move`\nClosure](https://doc.rust-lang.org/book/ch16-01-threads.html#using-move-closures-with-threads),\nwhich moves the function scoped variables into the thread scope. This avoids\nmanually specifying which variables need to be passed into the new\nfunction/closure scope.\n\n\nThere is a limitation though: `rss_feeds` is a reference `&`, passed as\nparameter by the `get_feeds()` function caller. The variable is only valid\nin the function scope. Use the following code snippet to provoke this error:\n\n\n```rust\n\npub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n\n    // Store threads in vector\n    let mut threads: Vec\u003Cthread::JoinHandle\u003C()>> = Vec::new();\n\n    // Loop over rss_feeds and spawn threads\n    for (key, value) in rss_feeds {\n        let thread = thread::spawn(move || {\n            println!(\"{}\", key);\n        });\n    }\n}\n\n```\n\n\n![VS Code Terminal, variable scope error with references and thread move\nclosure](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_cargo_build_error_function_threads_variable_scopes.png){:\n.shadow}\n\n\nAlthough the `key` variable was created in the function scope, it references\nthe `rss_feeds` variable, and therefore, it cannot be moved into the thread\nscope. Any values accessed from the function parameter `rss_feeds` hash map\nwill require a local copy with `clone()`.\n\n\n![VS Code Terminal, thread spawn with\nclone](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_thread_spawn_clone.png){:\n.shadow}\n\n\n```rust\n\npub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n\n    // Store threads in vector\n    let mut threads: Vec\u003Cthread::JoinHandle\u003C()>> = Vec::new();\n\n    // Loop over rss_feeds and spawn threads\n    for (name, url) in rss_feeds {\n        let thread_name = name.clone();\n        let thread_url = url.clone();\n        let thread = thread::spawn(move || {\n            // Use thread_name and thread_url as values, see next chapter for instructions.\n```\n\n\n## Parse feed XML into object types\n\nThe next step is to repeat the RSS feed parsing steps in the thread closure.\nAdd the following code comments with instructions for Code Suggestions:\n\n\n1. `// Parse XML body with feed_rs parser, input in bytes` to tell Code\nSuggestions that we want to fetch the RSS feed URL content, and parse it\nwith the `feed_rs` crate functions.\n\n2. `// Check feed_type attribute feed_rs::model::FeedType::RSS2 or Atom and\nprint its name`: Extract the feed type by comparing the `feed_type`\nattribute with the\n[`feed_rs::model::FeedType`](https://docs.rs/feed-rs/latest/feed_rs/model/enum.FeedType.html).\nThis needs more direct instructions for Code Suggestions telling it about\nthe exact Enum values to match against.\n\n\n![Instruct Code Suggestions to match against specific feed\ntypes](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_feed_rs_type_condition.png){:\n.shadow}\n\n\n```rust\n            // Parse XML body with feed_rs parser, input in bytes\n            let body = reqwest::blocking::get(thread_url).unwrap().bytes().unwrap();\n            let feed = feed_rs::parser::parse(body.as_ref()).unwrap();\n\n            // Check feed_type attribute feed_rs::model::FeedType::RSS2 or Atom and print its name\n            if feed.feed_type == feed_rs::model::FeedType::RSS2 {\n                println!(\"{} is an RSS2 feed\", thread_name);\n            } else if feed.feed_type == feed_rs::model::FeedType::Atom {\n                println!(\"{} is an Atom feed\", thread_name);\n            }\n```\n\n\nBuild and run the program again, and verify its output.\n\n\n```\n\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\"\n\"CNCF,https://www.cncf.io/feed/\"\n\n\nCNCF is an RSS2 feed\n\nTechCrunch is an RSS2 feed\n\nGitLab Blog is an Atom feed\n\nHacker News is an RSS2 feed\n\n```\n\n\nLet us verify this output by opening the feed URLs in the browser, or\ninspecting the previously downloaded files.\n\n\nHacker News supports RSS version 2.0, with\n`channel(title,link,description,item(title,link,pubDate,comments))`.\nTechCrunch and the CNCF blog follow a similar structure.\n\n```xml\n\n\u003Crss version=\"2.0\">\u003Cchannel>\u003Ctitle>Hacker\nNews\u003C/title>\u003Clink>https://news.ycombinator.com/\u003C/link>\u003Cdescription>Links for\nthe intellectually curious, ranked by\nreaders.\u003C/description>\u003Citem>\u003Ctitle>Writing a debugger from scratch:\nBreakpoints\u003C/title>\u003Clink>https://www.timdbg.com/posts/writing-a-debugger-from-scratch-part-5/\u003C/link>\u003CpubDate>Wed,\n27 Sep 2023 06:31:25\n+0000\u003C/pubDate>\u003Ccomments>https://news.ycombinator.com/item?id=37670938\u003C/comments>\u003Cdescription>\u003C![CDATA[\u003Ca\nhref=\"https://news.ycombinator.com/item?id=37670938\">Comments\u003C/a>]]>\u003C/description>\u003C/item>\u003Citem>\n\n```\n\n\nThe GitLab blog uses the\n[Atom](https://datatracker.ietf.org/doc/html/rfc4287) feed format similar to\nRSS, but still requires different parsing logic.\n\n```xml\n\n\u003C?xml version='1.0' encoding='utf-8' ?>\n\n\u003Cfeed xmlns='http://www.w3.org/2005/Atom'>\n\n\u003C!-- / Get release posts -->\n\n\u003C!-- / Get blog posts -->\n\n\u003Ctitle>GitLab\u003C/title>\n\n\u003Cid>https://about.gitlab.com/blog\u003C/id>\n\n\u003Clink href='https://about.gitlab.com/blog/' />\n\n\u003Cupdated>2023-09-26T00:00:00+00:00\u003C/updated>\n\n\u003Cauthor>\n\n\u003Cname>The GitLab Team\u003C/name>\n\n\u003C/author>\n\n\u003Centry>\n\n\u003Ctitle>Atlassian Server ending: Goodbye disjointed toolchain, hello\nDevSecOps platform\u003C/title>\n\n\u003Clink\nhref='https://about.gitlab.com/blog/atlassian-server-ending-move-to-a-single-devsecops-platform/'\nrel='alternate' />\n\n\u003Cid>https://about.gitlab.com/blog/atlassian-server-ending-move-to-a-single-devsecops-platform/\u003C/id>\n\n\u003Cpublished>2023-09-26T00:00:00+00:00\u003C/published>\n\n\u003Cupdated>2023-09-26T00:00:00+00:00\u003C/updated>\n\n\u003Cauthor>\n\n\u003Cname>Dave Steer, Justin Farris\u003C/name>\n\n\u003C/author>\n\n```\n\n\n### Map generic feed data types\n\nUsing\n[`roxmltree::Document::parse`](https://docs.rs/roxmltree/latest/roxmltree/struct.Document.html)\nwould require us to understand the XML node tree and its specific tag names.\nFortunately,\n[feed_rs::model::Feed](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Feed.html)\nprovides a combined model for RSS and Atom feeds, therefore let us continue\nusing the `feed_rs` crate.\n\n\n1. Atom: Feed->Feed, Entry->Entry\n\n2. RSS: Channel->Feed, Item->Entry\n\n\nIn addition to the mapping above, we need to extract the required\nattributes, and map their data types. It is helpful to open the\n[feed_rs::model\ndocumentation](https://docs.rs/feed-rs/latest/feed_rs/model/index.html) to\nunderstand the structs and their fields and implementations. Otherwise, some\nsuggestions would result in type conversion errors and compilation failures,\nthat are specific to the `feed_rs` implementation.\n\n\nA [`Feed`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Feed.html)\nstruct provides the `title`, type `Option\u003CText>` (either a value is set, or\nnothing). An\n[`Entry`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Entry.html)\nstruct provides:\n\n\n1. `title`: `Option\u003CText>`with\n[`Text`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Text.html) and\nthe `content` field as `String`.\n\n2. `updated`: `Option\u003CDateTime\u003CUtc>>` with\n[`DateTime`](https://docs.rs/chrono/latest/chrono/struct.DateTime.html) with\nthe [`format()`\nmethod](https://docs.rs/chrono/latest/chrono/struct.DateTime.html#method.format).\n\n3. `summary`: `Option\u003CText>`\n[`Text`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Text.html) and\nthe `content` field as `String`.\n\n4. `links`: `Vec\u003CLink>`, vector with\n[`Link`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Link.html)\nitems. The `href` attribute provides the raw URL string.\n\n\nUse this knowledge to extract the required data from the feed entries.\nReminder that all `Option` types need to call `unwrap()`, which requires\nmore raw instructions for Code Suggestions.\n\n\n```rust\n                // https://docs.rs/feed-rs/latest/feed_rs/model/struct.Feed.html\n                // https://docs.rs/feed-rs/latest/feed_rs/model/struct.Entry.html\n                // Loop over all entries, and print\n                // title.unwrap().content\n                // published.unwrap().format\n                // summary.unwrap().content\n                // links href as joined string\n                for entry in feed.entries {\n                    println!(\"Title: {}\", entry.title.unwrap().content);\n                    println!(\"Published: {}\", entry.published.unwrap().format(\"%Y-%m-%d %H:%M:%S\"));\n                    println!(\"Summary: {}\", entry.summary.unwrap().content);\n                    println!(\"Links: {:?}\", entry.links.iter().map(|link| link.href.clone()).collect::\u003CVec\u003CString>>().join(\", \"));\n                    println!();\n                }\n```\n\n\n![Code suggestions to print feed entry types, with specific\nrequirements](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_print_feed_entries_fields_with_rust_type_specifics.png){:\n.shadow}\n\n\n### Error handling with Option unwrap()\n\nContinue iterating on the multi-line instructions after building and running\nthe program again. Spoiler: `unwrap()` will call the `panic!` macro and\ncrash the program when it encounters empty values. This can happen if a\nfield like `summary` is not set in the feed data.\n\n\n```shell\n\nGitLab Blog is an Atom feed\n\nTitle: How the Colmena project uses GitLab to support citizen journalists\n\nPublished: 2023-09-27 00:00:00\n\nthread '\u003Cunnamed>' panicked at 'called `Option::unwrap()` on a `None`\nvalue', src/feed_reader.rs:40:59\n\n```\n\n\nA potential solution is to use\n[`std::Option::unwrap_or_else`](https://doc.rust-lang.org/std/option/enum.Option.html#method.unwrap_or_else)\nand set an empty string as default value. The syntax requires a closure that\nreturns an empty `Text` struct instantiation.\n\n\nSolving the problem required many attempts to find the correct\ninitialization, passing just an empty string did not work with the custom\ntypes. I will show you all my endeavors, including the research paths.\n\n\n```rust\n\n// Problem: The `summary` attribute is not always initialized. unwrap() will\npanic! then.\n\n// Requires use mime; and use feed_rs::model::Text;\n\n/*\n\n// 1st attempt: Use unwrap() to extraxt Text from Option\u003CText> type.\n\nprintln!(\"Summary: {}\", entry.summary.unwrap().content);\n\n// 2nd attempt. Learned about unwrap_or_else, passing an empty string.\n\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| \"\").content);\n\n// 3rd attempt. summary is of the Text type, pass a new struct\ninstantiation.\n\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{}).content);\n\n// 4th attempt. Struct instantiation requires 3 field values.\n\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{\"\", \"\",\n\"\"}).content);\n\n// 5th attempt. Struct instantation with public fields requires key: value\nsyntax\n\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type:\n\"\", src: \"\", content: \"\"}).content);\n\n// 6th attempt. Reviewed expected Text types in\nhttps://docs.rs/feed-rs/latest/feed_rs/model/struct.Text.html and created\nMime and String objects\n\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type:\nmime::TEXT_PLAIN, src: String::new(), content: String::new()}).content);\n\n// 7th attempt: String and Option\u003CString> cannot be casted automagically.\nCompiler suggested using `Option::Some()`.\n\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type:\nmime::TEXT_PLAIN, src: Option::Some(), content: String::new()}).content);\n\n*/\n\n\n// xth attempt: Solution. Option::Some() requires a new String object.\n\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type:\nmime::TEXT_PLAIN, src: Option::Some(String::new()), content:\nString::new()}).content);\n\n```\n\n\nThis approach did not feel satisfying, since the code line is complicated to\nread, and required manual work without help from Code Suggestions. Taking a\nstep back, I reviewed what brought me there - if `Option` is `none`,\n`unwrap()` will throw an error. Maybe there is an easier way to handle this?\nI asked Code Suggestions in a new comment:\n\n\n```\n                // xth attempt: Solution. Option::Some() requires a new String object.\n                println!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type: mime::TEXT_PLAIN, src: Option::Some(String::new()), content: String::new()}).content);\n\n                // Alternatively, use Option.is_none()\n```\n\n\n![Code suggestions asked for alternative with\nOptions.is_none](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_after_complex_unwrap_or_else_ask_for_alternative_option.png){:\n.shadow}\n\n\nIncreased readability, less CPU cycles wasted on `unwrap()`, and a great\nlearning curve from solving a complex problem to using a boring solution.\nWin-win.\n\n\nBefore we forget: Re-add storing the XML data on disk to complete the reader\napp again.\n\n\n```rust\n                // Dump the parsed body to a file, as name-current-iso-date.xml\n                let file_name = format!(\"{}-{}.xml\", thread_name, chrono::Local::now().format(\"%Y-%m-%d-%H-%M-%S\"));\n                let mut file = std::fs::File::create(file_name).unwrap();\n                file.write_all(body.as_ref()).unwrap();\n```\n\n\nBuild and run the program to verify the output.\n\n\n```shell\n\ncargo build\n\n\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\"\n\"CNCF,https://www.cncf.io/feed/\"\n\n```\n\n\n![VS Code Terminal, cargo run with formatted feed entries\noutput](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_cargo_run_formatted_output_final.png)\n\n\n## Benchmarks\n\n\n### Sequential vs. Parallel execution benchmark\n\nCompare the execution time benchmarks by creating five samples each.\n\n\n1. Sequential execution. [Example source code\nMR](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader/-/merge_requests/1)\n\n2. Parallel exeuction. [Example source code\nMR](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader/-/merge_requests/3)\n\n\n```shell\n\n# Sequential\n\ngit checkout sequential-exec\n\n\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\"\n\"CNCF,https://www.cncf.io/feed/\"\n\n\n0.21s user 0.08s system 10% cpu 2.898 total\n\n0.21s user 0.08s system 11% cpu 2.585 total\n\n0.21s user 0.09s system 10% cpu 2.946 total\n\n0.19s user 0.08s system 10% cpu 2.714 total\n\n0.20s user 0.10s system 10% cpu 2.808 total\n\n```\n\n\n```shell\n\n# Parallel\n\ngit checkout parallel-exec\n\n\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\"\n\"CNCF,https://www.cncf.io/feed/\"\n\n\n0.19s user 0.08s system 17% cpu 1.515 total\n\n0.18s user 0.08s system 16% cpu 1.561 total\n\n0.18s user 0.07s system 17% cpu 1.414 total\n\n0.19s user 0.08s system 18% cpu 1.447 total\n\n0.17s user 0.08s system 16% cpu 1.453 total\n\n```\n\n\nThe CPU usage increased for parallel execution of four RSS feed threads, but\nit nearly halved the total time compared to sequential execution. With that\nin mind, we can continue learning Rust and optimize the code and\nfunctionality.\n\n\nNote that we are running the debug build through Cargo, and not the\noptimized released builds yet. There are caveats with parallel execution\nthough: Some HTTP endpoints put rate limits in place, where parallelism\ncould hit these thresholds easier.\n\n\nThe system executing multiple threads in parallel might get overloaded too –\nthreads require context switching in the Kernel, assigning resources to each\nthread. While one thread gets computing resources, other threads are put to\nsleep. If there are too many threads spawned, this might slow down the\nsystem, rather than speeding up the operations. Solutions include design\npatterns such as [work\nqueues](https://docs.rs/work-queue/latest/work_queue/), where the caller\nadds a task into a queue, and a defined number of worker threads pick up the\ntasks for asynchronous execution.\n\n\nRust also provides data synchronisation between threads, so-called\n[channels](https://doc.rust-lang.org/rust-by-example/std_misc/channels.html).\nTo ensure concurrent data access,\n[mutexes](https://doc.rust-lang.org/std/sync/struct.Mutex.html) are\navailable to provide safe locks.\n\n\n### CI/CD with Rust caching\n\nAdd the following CI/CD configuration into the `.gitlab-ci.yml` file. The\n`run-latest` job calls `cargo run` with RSS feed URL examples, and measures\nthe execution time continuously.\n\n\n```\n\nstages:\n  - build\n  - test\n  - run\n\ndefault:\n  image: rust:latest\n  cache:\n    key: ${CI_COMMIT_REF_SLUG}\n    paths:\n      - .cargo/bin\n      - .cargo/registry/index\n      - .cargo/registry/cache\n      - target/debug/deps\n      - target/debug/build\n    policy: pull-push\n\n# Cargo data needs to be in the project directory for being cached.\n\nvariables:\n  CARGO_HOME: ${CI_PROJECT_DIR}/.cargo\n\nbuild-latest:\n  stage: build\n  script:\n    - cargo build --verbose\n\ntest-latest:\n  stage: build\n  script:\n    - cargo test --verbose\n\nrun-latest:\n  stage: run\n  script:\n    - time cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https://www.cncf.io/feed/\"\n```\n\n\n![GitLab CI/CD pipelines for Rust, cargo run\noutput](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/gitlab_cicd_pipeline_rust_cargo_run_output.png){:\n.shadow}\n\n\n## What is next\n\nThis blog post was challenging to create, with both learning advanced Rust\nprogramming techniques myself, and finding a good learning curve with Code\nSuggestions. The latter greatly helps with quickly generating code, not just\nboilerplate snippets – it understands the local context, and better\nunderstands the purpose and scope of the algorithm, the more code you write.\nAfter reading this blog post, you know of a few challenges and turnarounds.\nThe example solution code for the reader app is available in [the\nlearn-rust-ai-app-reader\nproject](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader).\n\n\nParsing RSS feeds is challenging since it involves data structures, with\nexternal HTTP requests and parallel optimizations. As an experienced Rust\nuser, you might have wondered: `Why not use the std::rss crate?` -- It is\noptimized for advanced asynchronous execution, and does not allow to show\nand explain the different Rust functionalities, explained in this blog post.\nAs an async exercise, try to rewrite the code using the [`rss`\ncrate](https://docs.rs/rss/latest/rss/).\n\n\n### Async learning exercises\n\nThe lessons learned in this blog post also lay the foundation for future\nexploration with persistent storage and presenting the data. Here are a few\nideas where you can continue learning Rust and optimize the reader app:\n\n\n1. Data storage: Use a database like sqlite, and RSS feed update tracking.\n\n2. Notifications: Spawn child processes to trigger notifications into\nTelegram, etc.\n\n3. Functionality: Extend the reader types to REST APIs\n\n4. Configuration: Add support for configuration files for RSS feeds, APIs,\netc.\n\n5. Efficiency: Add support for filters, and subscribed tags.\n\n6. Deployments: Use a webserver, collect Prometheus metrics, and deploy to\nKubernetes.\n\n\nIn a future blog post, we will discuss some of these ideas, and how to\nimplement them. Dive into existing RSS feed implementations, and learn how\nyou can refactor the existing code into leveraging more Rust libraries\n(`crates`).\n\n\n### Share your feedback\n\nWhen you use [GitLab Duo](/gitlab-duo/) Code Suggestions, please [share your\nthoughts in the feedback\nissue](https://gitlab.com/gitlab-org/gitlab/-/issues/405152).\n",[1159,2744,1035,9,766],"careers",{"slug":2746,"featured":6,"template":699},"learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions","content:en-us:blog:learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions.yml","Learn Advanced Rust Programming With A Little Help From Ai Code Suggestions","en-us/blog/learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions.yml","en-us/blog/learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions",{"_path":2752,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2753,"content":2759,"config":2764,"_id":2766,"_type":13,"title":2767,"_source":15,"_file":2768,"_stem":2769,"_extension":18},"/en-us/blog/learning-python-with-a-little-help-from-ai-code-suggestions",{"title":2754,"description":2755,"ogTitle":2754,"ogDescription":2755,"noIndex":6,"ogImage":2756,"ogUrl":2757,"ogSiteName":685,"ogType":686,"canonicalUrls":2757,"schema":2758},"Learning Python with a little help from AI","Use this guided tutorial, along with GitLab Duo Code Suggestions, to learn a new programming language.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663918/Blog/Hero%20Images/aipower.jpg","https://about.gitlab.com/blog/learning-python-with-a-little-help-from-ai-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learning Python with a little help from AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-11-09\",\n      }",{"title":2754,"description":2755,"authors":2760,"heroImage":2756,"date":2761,"body":2762,"category":764,"tags":2763},[2491],"2023-11-09","Learning a new programming language can help broaden your software\ndevelopment expertise, open career opportunities, or create fun challenges.\nHowever, it can be difficult to decide on one specific approach to learning\na new language. Artificial intelligence (AI) can help. In this tutorial,\nyou'll learn how to leverage AI-powered GitLab Duo Code Suggestions for a\nguided experience in learning the Python programming language with a\npractical hands-on example.\n\n\n- [Preparations](#preparations)\n  - [VS Code](#vs-code)\n  - [Code Suggestions](#code-suggestions)\n- [Learning a new programming language:\nPython](#learning-a-new-programming-language-python)\n    - [Development environment for Python](#development-environment-for-python)\n    - [Hello, World](#hello-world)\n- [Start learning Python with a practical\nexample](#start-learning-python-with-a-practical-example)\n    - [Define variables and print them](#define-variables-and-print-them)\n    - [Explore variable types](#explore-variable-types)\n- [File I/O: Read and print a log file](#file-io-read-and-print-a-log-file)\n\n- [Flow control](#flow-control)\n    - [Loops and lists to collect files](#loops-and-lists-to-collect-files)\n    - [Conditionally collect files](#conditionally-collect-files)\n- [Functions](#functions)\n    - [Start with a simple log format](#start-with-a-simple-log-format)\n    - [String and data structure operations](#string-and-data-structure-operations)\n    - [Parse log files using regular expressions](#parse-log-files-using-regular-expressions)\n    - [Advanced log format: auth.log](#advanced-log-format-authlog)\n    - [Parsing more types: Structured logging](#parsing-more-types-structured-logging)\n- [Printing results and formatting](#printing-results-and-formatting)\n\n- [Dependency management and continuous\nverification](#dependency-management-and-continuous-verification)\n    - [Pip and pyenv: Bringing structure into Python](#pip-and-pyenv-bringing-structure-into-python)\n    - [Automation: Configure CI/CD pipeline for Python](#automation-configure-cicd-pipeline-for-python)\n- [What is next](#what-is-next)\n    - [Async learning exercises](#async-learning-exercises)\n    - [Share your feedback](#share-your-feedback)\n\n## Preparations \n\n\nChoose your [preferred and supported\nIDE](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-in-other-ides-and-editors),\nand follow the documentation to enable Code Suggestions for [GitLab.com\nSaaS](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-gitlab-saas)\nor [GitLab self-managed\ninstances](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-self-managed-gitlab).\n\n\nProgramming languages can require installing the language interpreter\ncommand-line tools or compilers that generate binaries from source code to\nbuild and run the application.\n\n\n**Tip:** You can also use [GitLab Remote Development\nworkspaces](/blog/quick-start-guide-for-gitlab-workspaces/) to\ncreate your own cloud development environments, instead of local development\nenvironments. This blog post focuses on using VS Code and the GitLab Web\nIDE. \n\n\n### VS Code\n\n\n[Install VS Code](https://code.visualstudio.com/download) on your client,\nand open it. Navigate to the `Extensions` menu and search for `gitlab\nworkflow`. Install the [GitLab Workflow extension for VS\nCode](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow).\nVS Code will also detect the programming languages, and offer to install\nadditional plugins for syntax highlighting and development experience. For\nexample, install the [Python\nextension](https://marketplace.visualstudio.com/items?itemName=ms-python.python).\n\n\n### Code Suggestions\n\n\nFamiliarize yourself with suggestions before actually verifying the\nsuggestions. GitLab Duo Code Suggestions are provided as you type, so you do\nnot need use specific keyboard shortcuts. To accept a code suggestion, press\nthe `tab` key. Also note that writing new code works more reliably than\nrefactoring existing code. AI is non-deterministic, which means that the\nsame suggestion may not be repeated after deleting the code suggestion.\nWhile Code Suggestions is in Beta, we are working on improving the accuracy\nof generated content overall. Please review the [known\nlimitations](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#known-limitations),\nas this could affect your learning experience.\n\n\n**Tip:** The latest release of Code Suggestions supports multiline\ninstructions. You can refine the specifications to your needs to get better\nsuggestions. We will practice this method throughout the blog post.\n\n\n## Learning a new programming language: Python  \n\n\nNow, let's dig into learning Python, which is one of the [supported\nlanguages in Code\nSuggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#supported-languages). \n\n\nBefore diving into the source code, make sure to set up your development\nenvironment.\n\n\n### Development environment for Python \n\n\n1) Create a new project `learn-python-ai` in GitLab, and clone the project\ninto your development environment. All code snippets are available in this\n[\"Learn Python with AI\"\nproject](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-python-ai).\n\n\n```shell\n\ngit clone https://gitlab.com/NAMESPACE/learn-python-ai.git\n\n\ncd learn-python-ai\n\n\ngit status\n\n```\n\n\n2) Install Python and the build toolchain. Example on macOS using Homebrew:\n\n\n```\n\nbrew install python\n\n```\n\n\n3) Consider adding a `.gitignore` file for Python, for example this\n[.gitignore template for\nPython](https://gitlab.com/gitlab-org/gitlab/-/blob/master/vendor/gitignore/Python.gitignore?ref_type=heads). \n\n\nYou are all set to learn Python! \n\n\n### Hello, World\n\n\nStart your learning journey in the [official\ndocumentation](https://www.python.org/about/gettingstarted/), and review the\nlinked resources, for example, the [Python\ntutorial](https://docs.python.org/3/tutorial/index.html). The\n[library](https://docs.python.org/3/library/index.html) and [language\nreference](https://docs.python.org/3/reference/index.html) documentation can\nbe helpful, too. \n\n\n**Tip:** When I touched base with Python in 2005, I did not have many use\ncases except as a framework to test Windows 2000 drivers. Later, in 2016, I\nrefreshed my knowledge with the book \"Head First Python, 2nd Edition,\"\nproviding great practical examples for the best learning experience – two\nweeks later, I could explain the differences between Python 2 and 3. You do\nnot need to worry about Python 2 – it has been deprecated some years ago,\nand we will focus only on Python 3 in this blog post. In August 2023, \"[Head\nFirst Python, 3rd\nEdition](https://www.oreilly.com/library/view/head-first-python/9781492051282/)\"\nwas published. The book provides a great learning resource, along with the\nexercises shared in this blog post. \n\n\nCreate a new file `hello.py` in the root directory of the project and start\nwith a comment saying `# Hello world`. Review and accept the suggestion by\npressing the `tab` key and save the file (keyboard shortcut: cmd s). \n\n\n```\n\n# Hello world\n\n```\n\n\nCommit the change to the Git repository. In VS Code, use the keyboard\nshortcut `ctrl shift G`, add a commit message, and hit `cmd enter` to\nsubmit. \n\n\nUse the command palette (`cmd shift p`) and search for `create terminal` to\nopen a new terminal. Run the code with the Python interpreter. On macOS, the\nbinary from Homebrew is called `python3`, other operating systems and\ndistributions might use `python` without the version.\n\n\n```shell\n\npython3 hello.py\n\n```\n\n\n![Hello World, hello GitLab Duo Code\nSuggestions](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_python_code_sugestions_hello_world.png)\n\n\n**Tip:** Adding code comments in Python starting with the `#` character\nbefore you start writing a function or algorithm will help Code Suggestions\nwith more context to provide better suggestions. In the example above, we\ndid that with `# Hello world`, and will continue doing so in the next\nexercises.\n\n\nAdd `hello.py` to Git, commit all changes and push them to your GitLab\nproject.\n\n\n```shell\n\ngit add hello.py\n\n\ngit commit -avm \"Initialize Python\"\n\n\ngit push\n\n```\n\n\nThe source code for all exercises in this blog post is available in this\n[\"Learn Python with AI\"\nproject](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-python-ai).\n\n\n## Start learning Python with a practical example \n\n\nThe learning goal in the following sections involves diving into the\nlanguage datatypes, variables, flow control, and functions. We will also\nlook into file operations, string parsing, and data structure operations for\nprinting the results. The exercises will help build a command-line\napplication that reads different log formats, works with the data, and\nprovides a summary. This will be the foundation for future projects that\nfetch logs from REST APIs, and inspire more ideas such as rendering images,\ncreating a web server, or adding Observability metrics.\n\n\n![Parsing log files into structured objects, example result after following\nthe\nexercises](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_terminal_parsing_logs_and_pretty_print_results.png)\n\n\nAs an experienced admin, you can put the script into production and use\nreal-world log format exmples. Parsing and analyzing logs in stressful\nproduction incidents can be time-consuming. A local CLI tool is sometimes\nfaster than a log management tool.\n\n\nLet's get started: Create a new file called `log_reader.py` in the directory\nroot, add it to Git, and create a Git commit.\n\n\n### Define variables and print them\n\n\nAs a first step, we need to define the log files location, and the expected\nfile suffix. Therefore, let's create two variables and print them. Actually,\nask Code Suggestions to do that for you by writing only the code comments\nand accepting the suggestions. Sometimes, you need to experiment with\nsuggestions and delete already accepted code blocks. Do not worry – the\nquality of the suggestions will improve over time as the model generates\nbetter suggestions with more context.\n\n\n![Define log path and file suffix\nvariables](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_reader_variables_01.png){:\n.shadow}\n\n\n![Print the variables to\nverify](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_reader_variables_02.png){:\n.shadow}\n\n\n```python\n\n# Specify the path and file suffix in variables\n\npath = '/var/log/'\n\nfile_suffix = '.log'\n\n\n# Print the variables \n\n\nprint(path)\n\nprint(file_suffix)\n\n```\n\n\nNavigate into the VS Code terminal and run the Python script:\n\n\n```shell\n\npython3 log_reader.py\n\n```\n\n\n![VS Code terminal, printing the\nvariables](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_terminal_print_variables.png)\n\n\nPython supports many different types in the [standard\nlibrary](https://docs.python.org/3/library/index.html). Most common types\nare: Numeric (int, float, complex), Boolean (True, False), and String (str).\nData structures include support for lists, tuples, and dictionaries. \n\n\n### Explore variable types \n\n\nTo practice different variable types, let's define a limit of log files to\nread as a variable with the `integer` type.\n\n\n![Log file\nvariable](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_reader_variables_03.png){:\n.shadow}\n\n\n```python\n\n# Define log file limit variable \n\nlog_file_limit = 1024 \n\n```\n\n\nCreate a Boolean variable that forces to read all files in the directory, no\nmatter the log file suffix. \n\n\n```python\n\n# Define boolean variable whether to read all files recursively\n\nread_all_files_recursively = True\n\n```\n\n\n## File I/O: Read and print a log file\n\n\nCreate a directory called `log-data` in your project tree. You can copy all\nfile examples from the [log-data directory in the example\nproject](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-python-ai/-/tree/main/log-data?ref_type=heads).  \n\n\nCreate a new file `sample.log` with the following content, or any other two\nlines that provide a different message at the end.\n\n\n```\n\nOct 17 00:00:04 ebpf-chaos systemd[1]: dpkg-db-backup.service: Deactivated\nsuccessfully.\n\nOct 17 00:00:04 ebpf-chaos systemd[1]: Finished Daily dpkg database backup\nservice.\n\n```\n\n\nInstruct Code Suggestions to read the file `log-data/sample.log` and print\nthe content. \n\n\n![Code Suggestions: Read log file and print\nit](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_read_log_file_and_print.png){:\n.shadow}\n\n\n```python\n\n# Read the file in log-data/sample.log and print its content\n\nwith open('log-data/sample.log', 'r') as f:\n    print(f.read())\n```\n\n\n**Tip:** You will notice the indent here. The `with open() as f:` statement\nopens a new scope where `f` is available as stream. This flow requires\nindenting )`tab`) the code block, and perform actions in this scope, calling\n`f.read()` to read the file contents, and passing the immediate value as\nparameter into the `print()` function.\n\n\nNavigate into the terminal, and run the script again with `python3\nlog_reader.py`. You will see the file content shown in the VS Code editor,\nalso printed into the terminal.\n\n\n![VS Code terminal: Read log file, and print\nit](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_terminal_print_logfile_show_file_sample.png)\n\n\n## Flow control \n\n\nReading one log file is not enough – we want to analyze all files in a given\ndirectory recursively. For the next exercise, we instruct Code Suggestions\nto create an index of all files. \n\n\nPrepare the `log-data` directory with more example files from the [log-data\ndirectory in the example\nproject](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-python-ai/-/tree/main/log-data?ref_type=heads).\nThe directory tree should look as follows:\n\n\n```shell\n\ntree log-data                                                             ─╯\n\nlog-data\n\n├── sample.log\n\n└── var\n    └── log\n        ├── auth.log\n        ├── syslog.log\n        └── syslog_structured.log\n\n3 directories, 4 files\n\n```\n\n\n### Loops and lists to collect files \n\n\nModify the `path` variable to use the value `log-data/`. \n\n\n```python\n\n# Specify the path and file suffix in variables\n\npath = 'log-data/'\n\nfile_suffix = '.log'\n\n```\n\n\nTell Code Suggestions to read all file paths in the directory into a list.\nAfter the collection loop, print the list of file paths. \n\n\n```python\n\n# Read all file paths in the directory into a list\n\n\n# Print the list of log file paths\n\n```\n\n\n![Code Suggestion, collect file\npaths](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_collect_files.png){:\n.shadow}\n\n\nAn example of a suggestion could look like this:\n\n\n```python\n\n# Read all file paths in the directory into a list\n\nimport os\n\n\n# Read all file paths in the directory into a list\n\nlog_files = []\n\nfor root, directories, files in os.walk(path):\n    for file in files:\n        if file.endswith(file_suffix):\n            log_files.append(os.path.join(root, file))\n\n# Print the list of log file paths\n\n\nprint(log_files)\n\n```\n\n\nLet's analyze what happens here: The `import os` statement is required to\nbring the `os` library into the current scope, and we are able to call\n`os.walk()` later. The `log_files`\n[list](https://docs.python.org/3/library/stdtypes.html#sequence-types-list-tuple-range)\nis initialized as an empty list using empty brackets. `os.walk()` expects a\npath as directory tree to start searching for files. The function returns a\ngenerator object, which will walk the directory tree recursively. The\nreturned tuples include the collected files, which we want to access in a\nloop iteraor, using the `for` loop notation. When the file ends with the\ndefined suffix, its full path is appended to the list. This is an optimized\nand complex example. Do not worry about understanding everything just yet -\nwe will revisit this later. \n\n\n### Conditionally collect files \n\n\nModify the instructions and add the Boolean flag evaluation for\n`read_all_files_recursively` as an input to the glob() function. \n\n\n```python\n\n# Read all file paths in the top level directory into a list\n\n# If read_all_files_recursively is True, read all files recursively\n\n```\n\n\n![Code Suggestions, read files recursively\nconditionally](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_read_files_recursively_conditionally.png){:\n.shadow}\n\n\n```\n\n# Read all file paths in the top level directory \n\nimport os\n\n\n# Read all file paths in the top level directory into a list\n\n# If read_all_files_recursively is True, read all files recursively\n\n\nlog_files = []\n\nfor file in os.listdir(path):\n    if file.endswith(file_suffix):\n        log_files.append(os.path.join(path, file))\n\nif read_all_files_recursively:\n    for root, directories, files in os.walk(path):\n        for file in files:\n            if file.endswith(file_suffix):               \n                log_files.append(os.path.join(root, file))\n\n# Print log_files\n\nprint(log_files)\n\n```\n\n\nThe result is not optimal yet because it always executes the first loop, and\noptionally the second loop. This flow leads to duplicated results when the\nscript is executed.\n\n\n```\n\npython3 log_reader.py\n\n\n['log-data/sample.log', 'log-data/sample.log', 'log-data/var/log/auth.log']\n\n```\n\n\nExperiment with Code Suggestions instructions to get a solution for the\nproblem. There are different approaches you can take: \n\n\n1) A potential solution is to wrap the source code into an if-then-else\nblock, and move the `os.listdir()` loop into the else-block. \n\n\n```python\n\nif read_all_files_recursively:\n    for root, directories, files in os.walk(path):\n        for file in files:\n            if file.endswith(file_suffix):               \n                log_files.append(os.path.join(root, file))\nelse:\n    for file in os.listdir(path):\n        if file.endswith(file_suffix):\n            log_files.append(os.path.join(path, file))  \n\n```\n\n\n2) Alternatively, do not use `append()` to always add a new list entry, but\ncheck if the item exists in the list first. \n\n\n```python\n\nfor file in os.listdir(path):\n    if file.endswith(file_suffix):\n        # check if the entry exists in the list already\n        if os.path.isfile(os.path.join(path, file)):\n            log_files.append(os.path.join(path, file))\n\nif read_all_files_recursively:\n    for root, directories, files in os.walk(path):\n        for file in files:\n            if file.endswith(file_suffix):\n                # check if the entry exists in the list already\n                if file not in log_files:\n                    log_files.append(os.path.join(root, file))\n```\n\n\n3) Or, we could eliminate duplicate entries after collecting all items.\nPython allows converting lists into\n[sets](https://docs.python.org/3/library/stdtypes.html#set-types-set-frozenset),\nwhich hold unique entries. After applying `set()`, you can again convert the\nset back into a list. Code Suggestions knows about this possibility, and\nwill help with the comment `# Ensure that only unique file paths are in the\nlist` \n\n\n![Code Suggestions, converting a list to unique\nitems](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_unique_list.png){:\n.shadow}\n\n\n```python\n\n# Ensure that only unique file paths are in the list\n\n\nlog_files = list(set(log_files))\n\n```\n\n\n4) Take a step back and evaluate whether the variable\nread_all_files_recursively makes sense. Maybe the default behavior should\njust be reading all files recursively?\n\n\n**Tip for testing different paths in VS Code:** Select the code blocks, and\npress [`cmd /` on\nmacOS](https://code.visualstudio.com/docs/getstarted/keybindings) to comment\nout the code. \n\n\n## Functions \n\n\nLet's create a function called `parse_log_file` that parses a log file, and\nreturns the extracted data. We will define the expected log format and\ncolumns to extract, following the [syslog format\nspecification](https://en.wikipedia.org/wiki/Syslog). There are different\nlog format types and also customized formats by developers that need to be\ntaken into account – exercise for later. \n\n\n### Start with a simple log format \n\n\nInspect a running Linux VM, or use the following example log file example\nfor additional implementation.\n\n\n```\n\nless /var/log/syslog | grep -v docker \n\n\nOct 17 00:00:04 ebpf-chaos systemd[1]: Starting Daily dpkg database backup\nservice...\n\nOct 17 00:00:04 ebpf-chaos systemd[1]: Starting Rotate log files...\n\nOct 17 00:00:04 ebpf-chaos systemd[1]: dpkg-db-backup.service: Deactivated\nsuccessfully.\n\nOct 17 00:00:04 ebpf-chaos systemd[1]: Finished Daily dpkg database backup\nservice.\n\nOct 17 00:00:04 ebpf-chaos systemd[1]: logrotate.service: Deactivated\nsuccessfully.\n\nOct 17 00:00:04 ebpf-chaos systemd[1]: Finished Rotate log files.\n\nOct 17 00:17:01 ebpf-chaos CRON[727495]: (root) CMD (   cd / && run-parts\n--report /etc/cron.hourly)\n\n```\n\n\nWe can create an algorithm to split each log line by whitespaces, and then\njoin the results again. Let's ask Code Suggestions for help. \n\n\n```python\n\n# Split log line \"Oct 17 00:00:04 ebpf-chaos systemd[1]: Finished Rotate log\nfiles.\" by whitespaces and save in a list\n\n\nlog_line = \"Oct 17 00:00:04 ebpf-chaos systemd[1]: Finished Rotate log\nfiles.\"\n\nlog_line_split = log_line.split(\" \")\n\nprint(log_line_split)\n\n```\n\n\nRun the script again to verify the result.\n\n\n```shell\n\npython3 log_reader.py\n\n\n['Oct', '17', '00:00:04', 'ebpf-chaos', 'systemd[1]:', 'Finished', 'Rotate',\n'log', 'files.']\n\n```\n\n\nThe first three items are part of the datetime string, followed by the host,\nservice, and remaining log message items. Let's practice string operations\nin Python as the next step. \n\n\n### String and data structure operations\n\n\nLet's ask Code Suggestions for help with learning to join strings, and\nperform list operations.\n\n\n1. Join the first three items with a whitespace again. \n\n2. Keep host and service. \n\n3. Join the remaining variable item count into a string, separated with\nwhitespaces, again. \n\n4. Store the identified column keys, and their respective values in a new\ndata structure:\n[dictionary](https://docs.python.org/3/library/stdtypes.html#mapping-types-dict). \n\n\n![Code suggestions for list items with string\noperations](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_list_items_string_join_extract.png){:\n.shadow}\n\n\n```shell \n\npython3 log_reader.py\n\n\n# Array\n\n['Oct', '17', '00:00:04', 'ebpf-chaos', 'systemd[1]:', 'Finished', 'Rotate',\n'log', 'files.']\n\n\n# Dictionary \n\n{'datetime': 'Oct 17 00:00:04', 'host': 'ebpf-chaos', 'service':\n'systemd[1]:', 'message': ' ebpf-chaos systemd[1]: Finished Rotate log\nfiles.'}\n\n```\n\n\nA working suggestion can look like the following:\n\n\n```python\n\n# Initialize results dictionary with empty values for datetime, host,\nservice, message\n\n# Loop over log line split \n\n# Join the first three list items as date string\n\n# Item 4: host \n\n# Item 5: service\n\n# Join the remaining items into a string, separated with whitespaces \n\n# Print the results after the loop \n\n\nresults = {'datetime': '', 'host': '', 'service': '', 'message': ''}\n\n\nfor item in log_line_split:\n\n    if results['datetime'] == '':\n        results['datetime'] = ' '.join(log_line_split[0:3])\n\n    elif results['host'] == '':\n        results['host'] = log_line_split[3]\n\n    elif results['service'] == '':\n        results['service'] = log_line_split[4]\n\n    else:\n        results['message'] += ' ' + item\n\nprint(results)\n\n\n```\n\n\nThe suggested algorithm loops over all log line items, and applies the same\noperation for the first three items. `log_line_split[0:3]` extracts a slice\nof three items into a new list. Calling `join()` on a separator character\nand passing the array as an argument joins the items into a string. The\nalgorithm continues to check for not initialized values for host (Item 4)\nand service (Item 5)and concludes with the remaining list items appended\ninto the message string. To be honest, I would have used a slightly\ndifferent algorithm, but it is a great learning curve to see other\nalgorithms, and ways to implement them. Practice with different\ninstructions, and data structures, and continue printing the data sets. \n\n\n**Tip:** If you need to terminate a script early, you can use `sys.exit()`.\nThe remaining code will not be executed. \n\n\n```python\n\nimport sys \n\nsys.exit(1)\n\n```\n\n\nImagine doing these operations for different log formats, and message types\n– it can get complicated and error-prone very quickly. Maybe there is\nanother approach. \n\n\n### Parse log files using regular expressions\n\n\nThere are different syslog format RFCs – [RFC\n3164](https://datatracker.ietf.org/doc/html/rfc3164) is obsolete but still\nfound in the wild as default configuration (matching the pattern above),\nwhile [RFC 5424](https://datatracker.ietf.org/doc/html/rfc5424) is more\nmodern, including datetime with timezone information. Parsing this format\ncan be tricky, so let's ask Code Suggestions for advice. \n\n\nIn some cases, the suggestions include regular expressions. They might not\nmatch immediately, making the code more complex to debug, with trial and\nerrors. A good standalone resource to text and explain regular expressions\nis [regex101.com](https://regex101.com/).  \n\n\n**Tip:** You can skip diving deep into regular expressions using the\nfollowing code snippet as a quick cheat. The next step involves instructing\nCode Suggestions to use these log patterns, and help us extract all valuable\ncolumns. \n\n\n```python\n\n# Define the syslog log format regex in a dictionary\n\n# Add entries for RFC3164, RFC5424\n\nregex_log_pattern = {\n    'rfc3164': '([A-Z][a-z][a-z]\\s{1,2}\\d{1,2}\\s\\d{2}[:]\\d{2}[:]\\d{2})\\s([\\w][\\w\\d\\.@-]*)\\s(.*)$',\n    'rfc5424': '(?:(\\d{4}[-]\\d{2}[-]\\d{2}[T]\\d{2}[:]\\d{2}[:]\\d{2}(?:\\.\\d{1,6})?(?:[+-]\\d{2}[:]\\d{2}|Z)?)|-)\\s(?:([\\w][\\w\\d\\.@-]*)|-)\\s(.*)$;'\n}\n\n```\n\n\nWe know what the function should do, and its input parameters – the file\nname, and a log pattern to match. The log lines should be split by this\nregular expression, returning a key-value dictionary for each log line. The\nfunction should return a list of dictionaries. \n\n\n```python\n\n# Create a function that parses a log file\n\n# Input parameter: file path\n\n# Match log line against regex_log_pattern\n\n# Return the results as dictionary list: log line, pattern, extracted\ncolumns\n\n```\n\n\n![Code suggestion based on a multiline comment instruction to get a function\nthat parses a log file based on regex\npatterns](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_format_regex_function_instructions_01.png){:\n.shadow}\n\n\nRemember the indent for opening a new scope? The same applies for functions\nin Python. The `def` identifier requires a function name, and a list of\nparameters, followed by an opening colon. The next lines of code require the\nindent. VS Code will help with live-linting wrong indent, before the script\nexecution fails, or the CI/CD pipelines. \n\n\nContinue with Code Suggestions – it might already know that you want to\nparse all log files, and parse them using the newly created function. \n\n\n![Code suggestion to parse all log files, and print the result\nset](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_format_regex_function_instructions_02.png){:\n.shadow}\n\n\nA full working example can look like this: \n\n\n```\n\nimport os\n\n\n# Specify the path and file suffix in variables\n\npath = 'log-data/'\n\nfile_suffix = '.log'\n\n\n# Read all file paths in the directory into a list\n\nlog_files = []\n\nfor root, directories, files in os.walk(path):\n    for file in files:\n        if file.endswith(file_suffix):\n            log_files.append(os.path.join(root, file))\n\n# Define the syslog log format regex in a dictionary\n\n# Add entries for RFC3164, RFC5424\n\nregex_log_pattern = {\n    'rfc3164': '([A-Z][a-z][a-z]\\s{1,2}\\d{1,2}\\s\\d{2}[:]\\d{2}[:]\\d{2})\\s([\\w][\\w\\d\\.@-]*)\\s(.*)$',\n    'rfc5424': '(?:(\\d{4}[-]\\d{2}[-]\\d{2}[T]\\d{2}[:]\\d{2}[:]\\d{2}(?:\\.\\d{1,6})?(?:[+-]\\d{2}[:]\\d{2}|Z)?)|-)\\s(?:([\\w][\\w\\d\\.@-]*)|-)\\s(.*)$;'\n}\n\n\n# Create a function that parses a log file\n\n# Input parameter: file path\n\n# Match log line against regex_log_pattern\n\n# Return the results as dictionary list: log line, pattern name, extracted\ncolumns\n\nimport re\n\n\ndef parse_log_file(file_path):\n    # Read the log file\n    with open(file_path, 'r') as f:\n        log_lines = f.readlines()\n\n    # Create a list to store the results\n    results = []\n\n    # Iterate over the log lines\n    for log_line in log_lines:\n        # Match the log line against the regex pattern\n        for pattern_name, pattern in regex_log_pattern.items():\n            match = re.match(pattern, log_line)\n\n            # If the log line matches the pattern, add the results to the list\n            if match:\n                extracted_columns = match.groups()\n                results.append({\n                    'log_line': log_line,\n                    'pattern_name': pattern_name,\n                    'extracted_columns': extracted_columns,\n                    'source_file': file_path\n                })\n\n    # Return the results\n    return results\n\n# Parse all files and print results\n\nfor log_file in log_files:\n    results = parse_log_file(log_file)\n    print(results)\n```\n\n\nLet's unpack what the `parse_log_file()` function does:\n\n\n1. Opens the file from `file_path` parameter. \n\n2. Reads all lines into a new variable `log_lines`. \n\n3. Creates a results list to store all items. \n\n4. Iterates over the log lines. \n\n5. Matches against all regex patterns configured in regex_log_pattern. \n\n6. If a match is found, extracts the matching column values.\n\n7. Creates a results item, including the values for the keys `log_line`,\n`pattern_name`, `extracted_colums`, `source_file`. \n\n8. Appends the results item to the results list.\n\n9. Returns the results list. \n\n\nThere are different variations to this – especially for the returned result\ndata structure. For this specific case, log lines come as list already.\nAdding a dictionary object instead of a raw log line allows function callers\nto extract the desired information in the next step. Once a working example\nhas been implemented, you can refactor the code later, too. \n\n\n### Advanced log format: auth.log\n\n\nParsing the syslog on a Linux distribution might not unveil the necessary\ndata to analyze. On a virtual machine that exposes port 22 (SSH) to the\nworld, the authentication log is much more interesting – plenty of bots and\nmalicious actors testing default password combinations and often brute force\nattacks.\n\n\nThe following snippet from `/var/log/auth.log` on one of my private servers\nshows the authentication log format and the random attempts from bots using\ndifferent usernames, etc. \n\n\n```\n\nOct 15 00:00:19 ebpf-chaos sshd[3967944]: Failed password for invalid user\nubuntu from 93.254.246.194 port 48840 ssh2\n\nOct 15 00:00:20 ebpf-chaos sshd[3967916]: Failed password for root from\n180.101.88.227 port 44397 ssh2\n\nOct 15 00:00:21 ebpf-chaos sshd[3967944]: Received disconnect from\n93.254.246.194 port 48840:11: Bye Bye [preauth]\n\nOct 15 00:00:21 ebpf-chaos sshd[3967944]: Disconnected from invalid user\nubuntu 93.254.246.194 port 48840 [preauth]\n\nOct 15 00:00:24 ebpf-chaos sshd[3967916]: Failed password for root from\n180.101.88.227 port 44397 ssh2\n\nOct 15 00:00:25 ebpf-chaos sshd[3967916]: Received disconnect from\n180.101.88.227 port 44397:11:  [preauth]\n\nOct 15 00:00:25 ebpf-chaos sshd[3967916]: Disconnected from authenticating\nuser root 180.101.88.227 port 44397 [preauth]\n\nOct 15 00:00:25 ebpf-chaos sshd[3967916]: PAM 2 more authentication\nfailures; logname= uid=0 euid=0 tty=ssh ruser= rhost=180.101.88.227 \nuser=root\n\nOct 15 00:00:25 ebpf-chaos sshd[3967998]: Invalid user teamspeak from\n185.218.20.10 port 33436\n\n```\n\n\n**Tip for intrusion prevention:** Add a firewall setup, and use\n[fail2ban](https://en.wikipedia.org/wiki/Fail2ban) to block invalid auth\nlogins. \n\n\nThe next exercise is to extend the logic to understand the free form log\nmessage parts, for example `Failed password for invalid user ubuntu from\n93.254.246.194 port 48840 ssh2`. The task is to store the data in an\noptional dictionary with key value pairs. \n\n\nCreate a new function that takes the previously parsed log line results as\ninput, and specifically parses the last list item for each line.\n\n\n1. Count the number of `Failed password` and `Invalid user` messages.\n\n2. Return the results with count, log file, pattern \n\n\n![Code suggestions for a log file message parser to count auth.log\nfailures](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_parse_log_message_auth_log.png){:\n.shadow}\n\n\nA working suggestion can look like the following code:\n\n\n```python\n\n# Create a function that parses a log file message from the last\nextracted_columns entry \n\n# Input: Parsed log lines results list \n\n# Loop over all log lines in the list, and extract the last list item as\nmessage \n\n# Count failure strings in the message: Failed password, Invalid user \n\n# Return the results if failure count greater 0: log_file, count, failure\nstring\n\ndef parse_log_file_message(results):\n    failure_results = []\n\n    # Iterate over the log lines\n    for result in results:\n        # Extract the message from the last list item\n        message = result['extracted_columns'][-1]\n\n        # Count the number of failure strings in the message\n        failure_count = message.count('Failed password') + message.count('Invalid user')\n\n        # If the failure count is greater than 0, add the results to the list\n        if failure_count > 0:\n            failure_results.append({\n                'log_file': result['source_file'],\n                'count': failure_count,\n                'failure_string': message\n            })\n\n    # Return the results\n    return failure_results\n\n# Parse all files and print results\n\nfor log_file in log_files:\n    results = parse_log_file(log_file)\n    failure_results = parse_log_file_message(results)\n    print(failure_results)\n```\n\n\nThe algorithm follows the previous implementations: First, create a results\narray to store matching data. Then, iterate over the already parsed\nlog_lines in the list. Each log line contains the `extracted_columns` key,\nwhich holds the free-form message string at the end. The next step is to\ncall the string object function `count()` to count how many times a given\ncharacter sequence is contained in a string. The returned numbers are added\nup to the `failure_count` variable. If it is greater than zero, the result\nis added to the results list, including the `log_file`, `count` and\n`failure_string` key-value pairs. After returning the parsed log message\nresults, loop through all log files, parse them, and print the results\nagain. \n\n\nExecute the script to inspect the detected matches. Note that the data\nstructure can be optimized in future learning steps.\n\n\n```\n\npython3 log_reader.py\n\n\n[{'log_file': 'log-data/var/log/auth.log', 'count': 1, 'failure_string':\n'sshd[3967944]: Failed password for invalid user ubuntu from 93.254.246.194\nport 48840 ssh2'}, {'log_file': 'log-data/var/log/auth.log', 'count': 1,\n'failure_string': 'sshd[3967916]: Failed password for root from\n180.101.88.227 port 44397 ssh2'}, {'log_file': 'log-data/var/log/auth.log',\n'count': 1, 'failure_string': 'sshd[3967916]: Failed password for root from\n180.101.88.227 port 44397 ssh2'}, {'log_file': 'log-data/var/log/auth.log',\n'count': 1, 'failure_string': 'sshd[3967998]: Invalid user teamspeak from\n185.218.20.10 port 33436'}, {'log_file': 'log-data/var/log/auth.log',\n'count': 1, 'failure_string': 'sshd[3967998]: Failed password for invalid\nuser teamspeak from 185.218.20.10 port 33436 ssh2'}, {'log_file':\n'log-data/var/log/auth.log', 'count': 1, 'failure_string': 'sshd[3968077]:\nInvalid user mcserver from 218.211.33.146 port 50950'}]\n\n\n```\n\n\n### Parsing more types: Structured logging\n\n\nApplication developers can use the structured logging format to help machine\nparsers to extract the key value pairs. Prometheus provides this information\nin the following structure in syslog:\n\n\n```\n\nOct 17 19:00:10 ebpf-chaos prometheus[594]: ts=2023-10-17T19:00:10.425Z\ncaller=compact.go:519 level=info component=tsdb m\n\nsg=\"write block\" mint=1697558404661 maxt=1697565600000\nulid=01HCZG4ZX51GTH8H7PVBYDF4N6 duration=148.675854ms\n\nOct 17 19:00:10 ebpf-chaos prometheus[594]: ts=2023-10-17T19:00:10.464Z\ncaller=head.go:1213 level=info component=tsdb msg\n\n=\"Head GC completed\" caller=truncateMemory duration=6.845245ms\n\nOct 17 19:00:10 ebpf-chaos prometheus[594]: ts=2023-10-17T19:00:10.467Z\ncaller=checkpoint.go:100 level=info component=tsd\n\nb msg=\"Creating checkpoint\" from_segment=2308 to_segment=2309\nmint=1697565600000\n\nOct 17 19:00:10 ebpf-chaos prometheus[594]: ts=2023-10-17T19:00:10.517Z\ncaller=head.go:1185 level=info component=tsdb msg\n\n=\"WAL checkpoint complete\" first=2308 last=2309 duration=50.052621ms\n\n```\n\n\nThis format is easier to parse for scripts, because the message part can be\nsplit by whitespaces, and the assignment character `=`. Strings that contain\nwhitespaces are guaranteed to be enclosed with quotes. The downside is that\nnot all programming language libraries provide ready-to-use structured\nlogging libraries, making it harder for developers to adopt this format. \n\n\nPractice following the previous example to parse the `auth.log` format with\nadditional information. Tell Code Suggestions that you are expecting\nstructured logging format with key-value pairs, and which returned data\nstructure would be great:\n\n\n```python\n\n# Create a function that parses a log file message from the last\nextracted_columns entry \n\n# Input: Parsed log lines results list \n\n# Loop over all log lines in the list, and extract the last list item as\nmessage \n\n# Parse structured logging key-value pairs into a dictionary\n\n# Return results: log_file, dictionary \n\n```\n\n\n![Code suggestions for parsing structured logging format in the log file\nmessage\npart](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_propose_structured_logging_message_parser.png){:\n.shadow}\n\n\n### Printing results and formatting\n\n\nMany of the examples used the `print()` statement to print the content on\nthe terminal. Python objects in the standard library support text\nrepresentation, and for some types it makes more sense (string, numbers),\nothers cannot provide much details (functions, etc.). \n\n\nYou can also pretty-print almost any data structure (lists, sets,\ndictionaries) in Python. The JSON library can format data structures in a\nreadable format, and use a given spaces indent to draw the JSON structure on\nthe terminal. Note that we use the `import` statement here to bring\nlibraries into the current scope, and access their methods, for example\n`json.dumps`. \n\n\n```python\n\nimport json \n\nprint(json.dumps(structured_results, indent=4))\n\n```\n\n\n![Parsing log files into structured objects, example result after following\nthe\nexercises](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_terminal_parsing_logs_and_pretty_print_results.png)\n\n\nPractice with modifying the existing source code, and replace the code\nsnippets where appropriate. Alternatively, create a new function that\nimplements pretty printing.\n\n\n```python\n\n# Create a pretty print function with indent 4 \n\n```\n\n\n![Code suggestions for pretty-print\nfunction](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_pretty_print.png){:\n.shadow}\n\n\nThis idea works in a similar fashion with creating your own logger\nfunctions...but we have to stop learning and take a break. Before we\nconclude the first blog post in the learning series, let's ensure that CI/CD\nand dependencies are set up properly for future exercises and async\npractice. \n\n\n## Dependency management and continuous verification  \n\n\n### Pip and pyenv: Bringing structure into Python \n\n\nDependencies can be managed in the [`requirements.txt`\nfile](https://pip.pypa.io/en/stable/reference/requirements-file-format/),\nincluding optional version dependencies. Using `requirements.txt` file also\nhas the advantage of being the single source of truth for local development\nenvironments and running continuous builds with GitLab CI/CD. They can use\nthe same installation command:\n\n\n```shell\n\npip install -r requirements.txt\n\n```\n\n\nSome Linux distributions do not install the pip package manager by default,\nfor example, Ubuntu/Debian require to install the `python3-pip` package. \n\n\nYou can manage different virtual environments using\n[venv](https://docs.python.org/3/library/venv.html). This workflow can be\nbeneficial to install Python dependencies into the virtual environment,\ninstead of globally into the OS path which might break on upgrades. \n\n\n```shell\n\npip install virtualenv\n\nvirtualenv venv\n\nsource venv/bin/activate \n\n```\n\n\n### Automation: Configure CI/CD pipeline for Python\n\n\nThe [CI/CD pipeline](https://docs.gitlab.com/ee/ci/) should continuously\nlint, test, and build the code. You can mimic the steps from the local\ndevelopment, and add testing more environments and versions: \n\n\n1. Lint the source code and check for formatting errors. The example uses\n[Pyflakes](https://pypi.org/project/pyflakes/), a mature linter, and\n[Ruff](https://docs.astral.sh/ruff/ ), a fast linter written in Rust. \n\n2. Cache dependencies installed using the pip package manager, following the\ndocumentation for [Python caching in GitLab\nCI/CD](https://docs.gitlab.com/ee/ci/caching/#cache-python-dependencies).\nThis saves time and resources on repeated CI/CD pipeline runs.\n\n3. Use parallel matrix builds to test different Python versions, based on\nthe available container images on Docker Hub and their tags. \n\n\n```yaml\n\nstages:\n  - lint\n  - test\n\ndefault:\n  image: python:latest\n  cache:                      # Pip's cache doesn't store the python packages\n    paths:                    # https://pip.pypa.io/en/stable/topics/caching/\n      - .cache/pip\n  before_script:\n    - python -V               # Print out python version for debugging\n    - pip install virtualenv\n    - virtualenv venv\n    - source venv/bin/activate\n\nvariables:  # Change pip's cache directory to be inside the project\ndirectory since we can only cache local items.\n  PIP_CACHE_DIR: \"$CI_PROJECT_DIR/.cache/pip\"\n\n# lint template\n\n.lint-tmpl:\n  script:\n    - echo \"Linting Python version $VERSION\"\n  parallel:\n    matrix:\n      - VERSION: ['3.9', '3.10', '3.11', '3.12']   # https://hub.docker.com/_/python\n\n# Lint, using Pyflakes: https://pypi.org/project/pyflakes/ \n\nlint-pyflakes:\n  extends: [.lint-tmpl]\n  script:\n    - pip install -r requirements.txt\n    - find . -not -path './venv' -type f -name '*.py' -exec sh -c 'pyflakes {}' \\;\n\n# Lint, using Ruff (Rust): https://docs.astral.sh/ruff/ \n\nlint-ruff:\n  extends: [.lint-tmpl]\n  script:\n    - pip install -r requirements.txt\n    - ruff .\n```\n\n\n![GitLab CI/CD Python lint job view, part of matrix\nbuilds](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/gitlab_cicd_python_lint_job_log_view.png)\n\n\n## What is next \n\n\nFun fact: GitLab Duo Code Suggestions also helped writing this blog post in\nVS Code, knowing about the context. In the screenshot, I just wanted to add\na tip about [regex101](https://regex101.com/), and GitLab Duo already knew. \n\n\n![Writing the GitLab blog post in VS Code with support from GitLab Duo Code\nSuggestions](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/gitlab_duo_code_suggestions_helping_write_the_learning_python_ai_blog_post.png)\n\n\nIn an upcoming blog, we will look into advanced learning examples with more\npractical (log) filtering and parallel operations, how to fetch logs from\nAPI endpoints (CI/CD job logs for example), and more data analytics and\nobservability. Until then, here are a few recommendations for practicing\nasync.\n\n\n### Async learning exercises\n\n\n- Implement the missing `log_file_limit` variable check. \n\n- Print a summary of the results in Markdown, not only JSON format. \n\n- Extend the script to accept a search filter as environment variable.\nPrint/count only filtered results. \n\n- Extend the script to accept a date range. It might require parsing the\ndatetime column in a time object to compare the range. \n\n- Inspect a GitLab CI/CD pipeline job log, and download the raw format.\nExtend the log parser to parse this specific format, and print a summary. \n\n\n### Share your feedback\n\n\nWhich programming language are you learning or considering learning? Start a\nnew topic on our [community](/community/) forum or Discord and share your\nexperience.\n\n\nWhen you use [GitLab Duo](/gitlab-duo/) Code Suggestions, please share your\nthoughts and feedback [in the feedback\nissue](https://gitlab.com/gitlab-org/gitlab/-/issues/405152).\n",[495,1035,9,766],{"slug":2765,"featured":6,"template":699},"learning-python-with-a-little-help-from-ai-code-suggestions","content:en-us:blog:learning-python-with-a-little-help-from-ai-code-suggestions.yml","Learning Python With A Little Help From Ai Code Suggestions","en-us/blog/learning-python-with-a-little-help-from-ai-code-suggestions.yml","en-us/blog/learning-python-with-a-little-help-from-ai-code-suggestions",{"_path":2771,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2772,"content":2776,"config":2781,"_id":2783,"_type":13,"title":2784,"_source":15,"_file":2785,"_stem":2786,"_extension":18},"/en-us/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started",{"title":2773,"description":2755,"ogTitle":2773,"ogDescription":2755,"noIndex":6,"ogImage":2756,"ogUrl":2774,"ogSiteName":685,"ogType":686,"canonicalUrls":2774,"schema":2775},"Learning Rust with a little help from AI","https://about.gitlab.com/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learning Rust with a little help from AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-08-10\",\n      }",{"title":2773,"description":2755,"authors":2777,"heroImage":2756,"date":2778,"body":2779,"category":764,"tags":2780},[2491],"2023-08-10","Learning a new programming language can help broaden your software\ndevelopment expertise, open career opportunities, or create fun challenges.\nHowever, it can be difficult to decide on one specific approach to learning\na new language. Artificial intelligence (AI) can help. In this tutorial,\nyou'll learn how to leverage AI-powered GitLab Duo Code Suggestions for a\nguided experience in learning the Rust programming language.\n\n\n- [Preparations](#preparations)\n  - [VS Code](#vs-code)\n  - [Code Suggestions](#code-suggestions)\n- [Learning a new programming language:\nRust](#learning-a-new-programming-language-rust)\n    - [Development environment for Rust](#development-environment-for-rust)\n    - [Hello, World](#hello-world)\n- [Cargo: Bringing structure into Rust](#cargo-bringing-structure-into-rust)\n\n- [Automation: Configure CI/CD pipeline for\nRust](#automation-configure-cicd-pipeline-for-rust)\n\n- [Continue learning Rust](#continue-learning-rust)\n    - [Define variables and print them](#define-variables-and-print-them)\n    - [Explore variable types](#explore-variable-types)\n    - [Flow control: Conditions and loops](#flow-control-conditions-and-loops)\n    - [Functions](#functions)\n    - [Testing](#testing)\n- [What is next](#what-is-next)\n    - [Async learning exercises](#async-learning-exercises)\n    - [Share your feedback](#share-your-feedback)\n\n## Preparations \n\nChoose your [preferred and supported\nIDE](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-in-other-ides-and-editors),\nand follow the documentation to enable code suggestions for [GitLab.com\nSaaS](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-gitlab-saas)\nor [GitLab self-managed\ninstances](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-self-managed-gitlab).\n\n\nProgramming languages can require an install of the language interpreter\ncommand-line tools or compilers that generate binaries from source code to\nbuild and run the application.\n\n\nTip: You can also use [GitLab Remote Development\nworkspaces](/blog/quick-start-guide-for-gitlab-workspaces/) to\ncreate your own cloud development environments, instead of local development\nenvironments. This blog post focuses on using VS Code and the GitLab Web\nIDE. \n\n\n### VS Code\n\nOn macOS, you can [install VS Code](https://code.visualstudio.com/download)\nas a Homebrew cask or manually download and install it. \n\n\n```shell\n\nbrew install --cask visual-studio-code \n\n```\n\n\nNavigate to the `Extensions` menu and search for `gitlab workflow`. Install\nthe [GitLab workflow extension for VS\nCode](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow). \n\n\nTip: VS Code will also detect the programming languages, and offer to\ninstall additional plugins for syntax highlighting and development\nexperience. \n\n\n### Code Suggestions\n\nIt can help to familiarize yourself with suggestions before actually\nverifying the suggestions. GitLab Code Suggestions are provided as you type,\nso you do not need use specific keyboard shortcuts. To accept a code\nsuggestion, press the `tab` key. Also note that writing new code works more\nreliably than refactoring existing code. AI is non-deterministic, which\nmeans that the same suggestion may not be repeated after deleting the code\nsuggestion. While Code Suggestions is in Beta, we are working on improving\nthe accuracy of generated content overall. Please review the [known\nlimitations](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#known-limitations),\nas this could affect your learning experience. \n\n\n## Learning a new programming language: Rust \n\nNow, let's dig into learning Rust, which is one of the [supported languages\nin Code\nSuggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#supported-languages). \n\n\n[Rust by Example](https://doc.rust-lang.org/rust-by-example/) provides a\ngreat tutorial for beginners, together with the official [Rust\nbook](https://doc.rust-lang.org/book/). The [Hands-on Rust\nbook](https://hands-on-rust.com/) shows how to build a 2D game as a more\npractical approach. More examples are shared in [this Rust book\nlist](https://github.com/sger/RustBooks). \n\n\nBefore diving into the source code, make sure to set up your development\nenvironment.\n\n\n### Development environment for Rust\n\n1) Create a new project `learn-rust-ai` in GitLab, and clone the project\ninto your development environment. All code snippets are available in [this\n\"Learn Rust with AI\"\nproject](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai).\n\n\n```shell\n\ngit clone https://gitlab.com/NAMESPACE/learn-rust-ai.git\n\n\ncd learn-rust-ai\n\n\ngit status\n\n```\n\n\n2) Install Rust and the build toolchain. Fortunately, this is\nstraightforward [following the Rust install\ndocumentation](https://www.rust-lang.org/tools/install).\n\n\nTip for using the generic installer: Download the script and run it after\nreview. \n\n\n```\n\n# Download and print the script before running it\n\ncurl -Lvs https://sh.rustup.rs -o rustup-init.sh\n\n\n# Run the Rust installer script\n\nsh rustup-init.sh \n\n```\n\n\nExample on macOS using Homebrew:\n\n\n```\n\nbrew install rust\n\n```\n\n\n1) Optional: Install the [rust-analyzer VS Code\nextension](https://marketplace.visualstudio.com/items?itemName=rust-lang.rust-analyzer).\n\n\n2) Each exercise will invite you to compile the code with the [`rustc`\ncommand](https://doc.rust-lang.org/rustc/what-is-rustc.html), and later\nusing [`cargo` as build tool and package\nmanager](https://doc.rust-lang.org/cargo/index.html).\n\n\nYou are all set to learn Rust! \n\n\n### Hello, World\n\nWe will start with [Rust by\nExample](https://doc.rust-lang.org/rust-by-example/), and follow the [Hello,\nWorld exercise](https://doc.rust-lang.org/rust-by-example/hello.html).\n\n\nCreate a new file `hello.rs` in the root directory of the project and start\nwith a comment saying `// Hello world`. Next, start writing the `main`\nfunction, and verify the code suggestion.\n\n\n![VS Code hello.rs Rust code suggestion, asking to\naccept](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_hello_world_suggested.png){:\n.shadow}\n\n\nAccept the suggestion by pressing the `tab` key and save the file (keyboard\nshortcut: cmd s). \n\n\n```\n\n// Hello world\n\n\nfn main() {\n    println!(\"Hello, world!\");\n}\n\n```\n\n\n![VS Code hello.rs Rust code suggestion,\naccepted](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_hello_world_accepted.png){:\n.shadow}\n\n\nCommit the change to the Git repository. In VS Code, use the keyboard\nshortcut `ctrl shift G`, add a commit message, and hit `cmd enter` to\nsubmit. \n\n\nUse the command palette (`cmd shift p`) and search for `create terminal` to\nopen a new terminal. \n\n\nBuild and run the code.\n\n\n```shell\n\nrustc hello.rs\n\n\n./hello\n\n```\n\n\n![hello.rs Rust code suggestion, accepted, compiled,\nrun](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_hello_world_cli_build.png){:\n.shadow}\n\n\nTip: Adding [code comments in Rust\n(`//`)](https://doc.rust-lang.org/reference/comments.html) before you start\nwriting a function or algorithm will help Code Suggestions with more context\nto provide better suggestions. In the example above, we did that with `//\nHello world`, and will continue doing so in the next exercises. \n\n\n## Cargo: Bringing structure into Rust\n\n[Cargo](https://doc.rust-lang.org/rust-by-example/cargo.html) is the\nofficial Rust package management tool. It is more than that - you can run\nbuild and test commands because Cargo understands them as well. \n\n\nYou can initialize a new Cargo configuration in the current directory tree\nwith the following command:\n\n\n```shell\n\ncargo init\n\n```\n\n\nThe directory tree invites you to add the source code into the `src/`\ndirectory, while `Cargo.toml` manages the dependencies and used compiler\nversions. The `.gitignore` file is also added including best practices. \n\n\n```shell\n\ntree\n\n.\n\n├── Cargo.toml\n\n├── README.md\n\n├── hello\n\n├── hello.rs\n\n└── src\n    └── main.rs\n```\n\n\nTry building the code and running it using `cargo`.\n\n\n```shell\n\ncargo build\n\n\ncargo run\n\n```\n\n\nCommit all changes and push them to your GitLab project.\n\n\n```shell\n\ngit commit -avm \"Initialize Cargo\"\n\n\ngit push\n\n```\n\n\nAfter exploring Cargo, let's make sure that our code is continuously tested\nwhile learning Rust. The next section explains how to set up [GitLab\nCI/CD](https://about.gitlab.com/topics/ci-cd/) for Rust. \n\n\n## Automation: Configure CI/CD pipeline for Rust\n\nThe [CI/CD pipeline](https://docs.gitlab.com/ee/ci/) should run two jobs in\ntwo stages: Build and test the code. The default container\n[image](https://docs.gitlab.com/ee/ci/yaml/#image), `rust:latest`, works in\nthe first iteration. In order to save resources, the CI/CD configuration\nalso supports [caching](https://docs.gitlab.com/ee/ci/caching/) for\ndownloaded dependencies and build objects. The `CARGO_HOME` variable is set\nto the CI/CD job home directory to ensure everything gets appropriately\ncached.\n\n\n```yaml\n\nstages:\n  - build\n  - test \n\ndefault:\n  image: rust:latest\n  cache:\n    key: ${CI_COMMIT_REF_SLUG}\n    paths:                      \n      - .cargo/bin\n      - .cargo/registry/index\n      - .cargo/registry/cache\n      - target/debug/deps\n      - target/debug/build\n    policy: pull-push\n\n# Cargo data needs to be in the project directory to be cached. \n\nvariables:\n  CARGO_HOME: ${CI_PROJECT_DIR}/.cargo      \n```\n\n\nThe CI/CD jobs inherit the\n[`default`](https://docs.gitlab.com/ee/ci/yaml/#default) values, and specify\nthe cargo commands in the [`script`\nsection](https://docs.gitlab.com/ee/ci/yaml/#script).\n\n\n```yaml\n\nbuild-latest:\n  stage: build\n  script:\n    - cargo build --verbose\n\ntest-latest:\n  stage: build\n  script:\n    - cargo test --verbose\n```\n\n\nYou can see an example in [this\nMR](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/merge_requests/1/diffs).\n\n\n## Continue learning Rust \n\nMake sure to add new source code into the `src/` directory. \n\n\n### Define variables and print them\n\nPractice adding a few more\n[print](https://doc.rust-lang.org/rust-by-example/hello/print.html)\nstatements into `src/main.rs`, and then build and run the code again.\n\n\n1) Define a variable called `name` and assign your name as string value.\n\n\n2) Print the name, including a string prefix saying `Hello, `. \n\n\n![VS Code main.rs Rust code suggestion, first step in\nprint](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_print_variable_first.png){:\n.shadow}\n\n\n![VS Code main.rs Rust code suggestion, second step in\nprint](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_print_variable_second.png){:\n.shadow}\n\n\n1) Open a new terminal in VS Code using the command palette (keyboard\nshortcut `cmd + shift + p`) and search for `terminal`.\n\n\n2) Build and run the code with the `cargo build` and `cargo run` commands. \n\n\n![VS Code terminal with cargo build and run\noutput](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_print_variable_cargo_build_run_terminal.png){:\n.shadow}\n\n\nAn example solution can be found\n[here](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/blob/main/solutions/variable_print.rs). \n\n\n### Explore variable types \n\nDefine different variable value types\n([primitives](https://doc.rust-lang.org/rust-by-example/primitives.html))\nand embed them into the `print` statements. Maybe they feel familiar with\nother programming languages?\n\n\nTip: Use code comments to see which code suggestions can be useful to learn.\nStart with typing `// Integer addition` and see what code suggestions you\ncan add.\n\n\n![VS Code main.rs Rust code suggestion, primitive types with literals and\nexpressions](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_primitive_types_literals_operators.png)\n\n\nExperiment with GitLab Duo Code Suggestions. The shown examples are\nnon-deterministic, but you may be able to add additions, subscriptions,\nmultiplications, etc., and the corresponding `println` statements just by\naccepting code suggestions and continuing the flow with `enter` or\ncompleting the code statements. This workflow can create a chain of code\nsuggestions that can help you learn the Rust language. \n\n\n![Literals and expressions, first\nsuggestion](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_literals_expressions_01.png){:\n.shadow}\n\n![Literals and expressions, second\nsuggestion](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_literals_expressions_02.png){:\n.shadow}\n\n![Literals and expressions, third\nsuggestion](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_literals_expressions_03.png){:\n.shadow}\n\n\nAn example solution can be found\n[here](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/blob/main/solutions/literals_expressions.rs). \n\n\nThe code suggestions are not perfect. Sometimes there are errors that\nrequire you to fix the problems. When writing this blog post, I had to fix\ntwo missing semicolons at the end of the code lines. The great thing about\nthe Rust compiler is that the error messages tell you exactly where the\nproblem happens with suggestions to fix them. Code Suggestions and the\nRust-provided build chain make writing Rust code more efficient. \n\n\n```rust\n\nprintln!(\"Hello, {}!\", name)\n\n\n// Integer subtraction\n\nlet y = 9 - 4\n\n```\n\n\n![Terminal build, errors, Rust compiler\nhelp](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_terminal_errors_rust_help.png){:\n.shadow}\n\n\nYou can try to provoke the same error by removing a semicolon at the end of\na statement and then running `cargo build` in the terminal again. The Rust\ncompiler will also warn you about unused variables to help with better code\nquality. The screenshot shows warnings for variable definitions, and also a\nCLI command to fix them. \n\n\n![Terminal build, warnings, Rust compiler\nhelp](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_terminal_warnings_rust_help.png){:\n.shadow}\n\n\n### Flow control: Conditions and loops \n\nNext, let's focus on [flow\ncontrol](https://doc.rust-lang.org/rust-by-example/flow_control.html) with\nconditions, loops, etc., and how to implement them.\n\n\n1) Start typing `// Flow control` and see which suggestions are provided.\n\n\n2) Experiment with the code, and continue defining a boolean variable `v`\nwhich is set to true. \n\n\n```rust\n  // Flow control\n  let v = true;\n\n```\n\n\n![Conditions, boolean\nvariable](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_flow_control_conditions_01.png){:\n.shadow}\n\n![Conditions, boolean variable, if\ncondition](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_flow_control_conditions_02.png){:\n.shadow}\n\n\n1) Start typing `// Loops` and experiment with the code suggestions. \n\n\nLet's assume the loop looks the like following snippet. It does not have a\nloop counter which gets printed on every loop execution.\n\n\n```rust\n\n// Loops\n\nlet mut count = 0;\n\n\nloop {\n    count += 1;\n\n    if count == 10 {\n        break;\n    }\n}\n\n```\n\n\n2) Start typing `println!` and see which code suggestions are provided, for\nexample `println!(\"Count: {}\", count);`. \n\n\n![Loops, loop counter print\nsuggestion](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_flow_control_loops_print_counter.png)\n\n\n3) Apply the suggestions, and execute `cargo build && cargo run` on the\nterminal again. \n\n\nLet's learn more: Rust supports different loop types, for example [while\nloops](https://doc.rust-lang.org/rust-by-example/flow_control/while.html),\n[for\nloops](https://doc.rust-lang.org/rust-by-example/flow_control/for.html),\netc. \n\n\n1) Type `// While loop` and verify the code suggestions. Repeat the same for\n`// For loop`.\n\n\n```rust\n\n// While loops\n\nlet mut count = 0;\n\n\nwhile count \u003C 10 {\n    count += 1;\n    println!(\"Count: {}\", count);\n}\n\n\n// For loops\n\nlet a = [10, 2, 3, 4, 5];\n\n\nfor element in a {\n    println!(\"Element: {}\", element);\n}\n\n```\n\n\nThere is more to learn with loops and conditions: Iterate over arrays,\nlists, maps, slices. Practice with writing comments for `// Maps and sets`\nand `// Vectors and strings`. \n\n\n![Vectors,\nstrings](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_flow_control_vectors_strings.png){:\n.shadow}\n\n\n```rust\n  // Maps and sets\n  let mut scores = HashMap::new();\n\n  scores.insert(String::from(\"Blue\"), 10);\n  scores.insert(String::from(\"Yellow\"), 50);\n\n  for (key, value ) in &scores {\n      println!(\"{}: {}\", key, value);\n  }\n\n  // Vectors and strings\n  let mut v = Vec::new();\n\n  v.push(1);\n  v.push(2);\n\n  for element in &v {\n      println!(\"Element: {}\", element);\n  }  \n```\n\n\nThis snippet will fail because the `HashMap` type needs to be imported from\n`std::collections::HashMap`. Add the following line on top before the main\nfunction definition: \n\n\n```rust\n\nuse std::collections::HashMap;\n\n``` \n\n\n2) Build and run the code with `cargo build && cargo run`. \n\n\nAn example solution is provided\n[here](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/blob/main/solutions/flow_control.rs).\n\n\n**Async exercise**: \n\n\n1) Modify the input values for the different data types, and build and run\nthe code again.\n\n\n2) Add a condition into the loops that print the items only when a specific\ncondition is met (for example, the number is odd or even). \n\n\n### Functions \n\n\n[Functions](https://doc.rust-lang.org/rust-by-example/fn.html) help increase\ncode readability and testability with unit tests. Practice creating\nfunctions with the following steps: \n\n\n1) Two functions `isEven` and `isOdd` to evaluate whether a number is even\nor odd.\n\n\n```rust\n\nfn isEven(x: i32) -> bool {\n    x % 2 == 0\n}\n\n\nfn isOdd(x: i32) -> bool {\n    x % 2 != 0\n}\n\n```\n\n\n2) `isPrime` function to check whether a given integer value is a prime\nnumber.\n\n\n```rust\n\nfn isPrime(x: i32) -> bool {\n    let mut i = 2;\n\n    while i * i \u003C= x {\n        if x % i == 0 {\n            return false;\n        } else {\n            i += 1;\n        }\n    } \n\n    return true\n}\n\n```\n\n\n3) Create an array of integer values, loop over it, and call the functions.\nLet GitLab Code Suggestions guide you with the implementation by starting to\ntype the if conditions followed by the function name. \n\n\n```rust\n  // Functions\n  let mut integers = vec![1, 2, 3, 4, 5];\n\n  for i in integers.iter() {\n\n      if (isEven(i)) {\n          println!(\"{} is even\", i);\n      }\n\n      if (isOdd(i)) { \n          println!(\"{} is odd\", i);\n      }\n\n      if (isPrime(i)) { \n          println!(\"{} is prime\", i);\n      }\n\n      println!(\"{}\", i);\n  }\n```\n\n\nNote that passing a reference value to a function may result in an error\nfrom the Rust compiler. Follow the suggestions and build the code again. \n\n\n```shell\n\n$ cargo build && cargo run \n\n\nerror[E0308]: mismatched types\n   --> src/main.rs:112:21\n    |\n112 |         if (isPrime(i)) { \n    |             ------- ^ expected `i32`, found `&{integer}`\n    |             |\n    |             arguments to this function are incorrect\n    |    \nnote: function defined here\n   --> src/main.rs:136:4\n    |\n136 | fn isPrime(x: i32) -> bool {\n    |    ^^^^^^^ ------\nhelp: consider dereferencing the borrow\n    |\n112 |         if (isPrime(*i)) { \n    |                     +\n```\n\n\nAn example solution is provided\n[here](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/blob/main/solutions/functions.rs).\n\n\n**Async exercise**: Create a function `containsString` and test it with an\narray of string values, and a string to search for, in a loop. The\nscreenshot shows a potential implementation. \n\n\n![containsString function, and vector with string elements to test,\nsuggesting its usage in the main\nfunction](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_function_implemented_then_suggested_in_main.png){:\n.shadow}\n\n\n### Testing \n\nWhile learning programming, adopt\n[testing](https://doc.rust-lang.org/rust-by-example/testing.html) into your\nprocess. This can be unit tests for functions, documentation testing, and\nintegration testing. Practice with testing the previously created functions\n`isEven`, `isOdd`, and `isPrime`. Starty by typing `mod tests {` followed by\na new line with `use super::*` to implement the example from the [Rust\ndocumentation for unit\ntests](https://doc.rust-lang.org/rust-by-example/testing/unit_testing.html).\n\n\n```rust\n\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_is_even() {\n        assert!(isEven(2)); \n        assert!(!isEven(3));\n    }\n\n    #[test] \n    fn test_is_odd() {\n        assert!(!isOdd(2));\n        assert!(isOdd(3));\n    }\n\n    #[test]\n    fn test_is_prime() { \n        assert!(isPrime(2));\n        assert!(!isPrime(3));\n    }\n}\n\n```\n\n\nRun `cargo test` to run the unit tests. Modify the test values to experiment\nwith the results. \n\n\n```shell\n\ncargo test\n\n```\n\n\n![Function unit tests, cargo test output in the VS Code\nterminal](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_function_unit_tests_terminal_run.png)\n\n\nCreate unit tests that fail, and commit and push the changes to GitLab. The\nCI/CD pipelines will fail in this simulated breakage. The example above\nneeds a fix for the `test_is_prime` test. Commit and push the change to\nverify that the pipeline passes again. \n\n\n```diff\n\n-        assert!(!isPrime(3));\n\n+        assert!(!isPrime(4));\n\n```\n\n\n## What is next \n\nIn an upcoming blog, we will look into advanced learning examples with\nasynchronous operations, services and external API communication in future\nblog posts. Until then, here are a few recommendations for practicing async.\n\n\n### Async learning exercises\n\n- [`std misc`](https://doc.rust-lang.org/rust-by-example/std_misc.html)\nprovides asynchronous operations with threads, channels and file I/O\n\n- Book: [Hands-on Rust: Effective Learning through 2D Game Development and\nPlay](https://pragprog.com/titles/hwrust/hands-on-rust/)\n\n- Tutorial: [Are we game yet?](https://arewegameyet.rs/resources/tutorials/)\n\n- Use case: [Web server with\nrocket.rs](https://rocket.rs/v0.5-rc/guide/quickstart/#running-examples)\n\n\nHere are a few more exercises and ideas for additional learning:\n\n1) The Rust compiler might have created warnings that need to be addressed.\nFollow the instructions from the `cargo build` commands and check the Git\ndiff. \n\n\n```\n\ncargo fix --bin \"learn-rust-ai\"\n\n\ngit diff \n\n```\n\n\n2) [Error handling](https://doc.rust-lang.org/rust-by-example/error.html) is\nrequired when failure is detected, and the caller should know. Some errors\ncan be recovered from within the application, others require program\ntermination. \n\n\n3) The [`std` library](https://doc.rust-lang.org/rust-by-example/std.html)\nextends primitive types and makes programming more efficient. \n\n\n### Share your feedback\n\nWhich programming language are you learning or considering learning? Start a\nnew topic on our [community](/community/) forum or Discord and share your\nexperience.  \n\n\nIf you are using Code Suggestions Beta with [GitLab Duo](/gitlab-duo/)\nalready, please share your thoughts and feedback [in this\nissue](https://gitlab.com/gitlab-org/gitlab/-/issues/405152).\n",[1159,2744,1035,9,766],{"slug":2782,"featured":6,"template":699},"learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started","content:en-us:blog:learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started.yml","Learning Rust With A Little Help From Ai Code Suggestions Getting Started","en-us/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started.yml","en-us/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started",{"_path":2788,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2789,"content":2795,"config":2801,"_id":2803,"_type":13,"title":2804,"_source":15,"_file":2805,"_stem":2806,"_extension":18},"/en-us/blog/lee-tickett-my-gitlab-journey",{"title":2790,"description":2791,"ogTitle":2790,"ogDescription":2791,"noIndex":6,"ogImage":2792,"ogUrl":2793,"ogSiteName":685,"ogType":686,"canonicalUrls":2793,"schema":2794},"From user, to advocate, to contributor: my GitLab journey","Three years (as a user and as a contributor) with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681735/Blog/Hero%20Images/cover_photo.jpg","https://about.gitlab.com/blog/lee-tickett-my-gitlab-journey","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"From user, to advocate, to contributor: my GitLab journey\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Lee Tickett\"}],\n        \"datePublished\": \"2020-11-13\",\n      }",{"title":2790,"description":2791,"authors":2796,"heroImage":2792,"date":2798,"body":2799,"category":1239,"tags":2800},[2797],"Lee Tickett","2020-11-13","{::options parse_block_html=\"true\" /}\n\n\n\n\nI have had a passion for technology since before I can remember. Thirteen\nyears ago I took the plunge, quit my day job, and started an IT development\nand support company called [Tickett Enterprises\nLimited](https://www.tickett.net). For the last three years, GitLab has been\na part of my journey.\n\n\n## 3 Years Ago \n\nWe were (and still are) using a helpdesk system we built ourselves. It does\nexactly what we need it to do - and any time it doesn’t, we change it. The\nmost important feature of the system is reporting. Specifically,\nfacilitating our monthly billing process; with a click of a button, we\ngenerate timesheets and invoices for all of our clients.\n\n\nThough I was aware of Git (and GitHub), I had not heard of GitLab. We were\nusing SVN in its most basic form (single repository for all projects and no\nbranching), with an integration so all commits would create notes in our\nhelpdesk.\n\n\n## 2.5 Years Ago\n\nWe decided that SVN was no longer fit for purpose. Our top issues were: \n\n* never knowing whether the code in our repository matched what was deployed\n\n* not being able to work collaboratively on projects\n\n* feature/knowledge limitations\n\n* Git was the industry standard \n\n\nWhile most of these issues were due to the way we were using SVN, we were\nkeen to adopt a more popular system. I don’t remember how I found GitLab,\nbut I did, and spun up a local on-prem instance of Community Edition (CE)\nusing separate projects/repositories and basic branching. If you are\nconsidering running a local instance, I recommend the [Bitnami\nappliance/.ova](https://bitnami.com/stack/gitlab).\n\n\nIt took some time to get used to local vs remote and to remember to push as\nwell as commit, but we picked it up pretty quickly.\n\n\n## 2 Years Ago\n\nWe wanted to use GitLab to help us improve our processes so we:\n\n* built a little UI for project creation (using the GitLab API). This\nensures new projects fit our naming standards, contain our standard template\nfiles, have our standard master/test/dev branches, contain the relevant\nmembers, and use our webhooks\n\n* recreated the helpdesk integration we had with SVN (every commit and\ncomment is replicated as a note on our helpdesk)\n\n* unaware of GitLab EE, we created a custom merge request approval process\nusing webhooks. Our master branch is always protected - a merge request\nrequires 2 approvals from 2 distinct reviewers (one for code and one for\nfunctionality)\n\n\n## 1.5 Years Ago\n\nA bit late to the party, but finally we set up the GitLab runner to automate\nour build, spin up our database, execute our unit tests and report test\ndetails and code coverage. GitLab CI for .NET was not as well documented as\nother use cases leading to a lot of trial and error when setting up the\nrunner.\n\n\nWe are using the Windows runner configured to use a standard shell (which I\nthink is no longer supported). We will either be moving to powershell on\nwindows or possibly using docker images. Here’s a sample .gitlab-ci.yml\n\n\n```yml\n\nstages:\n  - build\n  - test\n\nvariables:\n  CI_DEBUG_TRACE: \"false\"\n  ASSEMBLY_VERSION: \"1.0.4\"\n  \nbuild:\n stage: build\n script:\n  - 'C:\\Windows\\Microsoft.NET\\Framework\\v4.0.30319\\nuget restore'\n  - '\"C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\BuildTools\\MSBuild\\15.0\\bin\\msbuild\" /t:Restore,Clean,ReBuild /t:Database:Publish /p:Configuration=Debug;Platform=\"Any CPU\" /p:SqlPublishProfilePath=Database.publish.xml'\n  - 'ping 192.168.99.99 -n 1 -w 10000 2>nul || type nul>nul'\n artifacts:\n  paths:\n   - Tests/bin/\n\ntest:\n stage: test\n script:\n  - 'c:\\GitLab-Runner\\opencover\\OpenCover.Console.exe -returntargetcode:1000 -filter:\"+[*]* -[nunit*]* -[*Tests*]*\" -register -target:\"C:\\Program Files (x86)\\NUnit.org\\nunit-console\\nunit3-console.exe\" -targetargs:\"Tests\\Tests.csproj --result=testresult.xml;transform=C:\\gitlab-runner\\nunit3-junit.xslt\"'\n coverage: '/^Visited Branches .*(\\(\\d+\\.?\\d*\\))/'\n dependencies:\n  - build\n artifacts:\n  reports:\n   junit: testresult.xml\n```\n\n\nWe were building another customization to allow us to search for code across\nall repositories. Unfortunately, we hit a limitation because the API did not\nallow searching anything but the default branch.\n\n\nAt this point, while Googling for help getting CI up and running, I learned\nthat GitLab is open-source. So I thought maybe I could extend the API to\nsupport searching any branch. This lead to [my first\ncontribution](https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/28069).\n\n\n## 1 Year Ago\n\nAt this point, I was completely new to all of the technologies, techniques,\nand best practices used by GitLab but found myself participating in my first\n[GitLab hackathon](https://about.gitlab.com/community/hackathon/). Somehow,\nI managed to take joint first prize!\n\n\nMy first few contributions were achieved by modifying my production GitLab\ninstallation (not ideal). So it was time to get the [GitLab Development Kit\n(GDK)](https://gitlab.com/gitlab-org/gitlab-development-kit) up and running.\nThis was certainly not without its challenges (many of which I suspect stem\nfrom me being in the minority of GitLab contributors running Windows).\n\n\nI have since contributed to the [GDK\nproject](https://gitlab.com/gitlab-org/gitlab-development-kit) and joined\nthe GDK office hour calls to help shape the way forward and resolve some of\nthe problems and frustrations.\n\n\nAt this point, I was leearning a lot. Not just about the tools and languages\nbut about the best practices and work ethos within the GitLab team. Better\nyet, I was able to start taking some of these learnings back to the office.\n\n\n## 0.5 Years Ago\n\nI attended GitLab Commit - London 2019. This really helped to confirm my\nsuspicions; we are only scraping the surface of GitLab's capabilities.\n\n\nOn a few occasions, I wondered whether GitLab may not be a good fit for my\ncompany as I watched huge companies like Porsche and Goldman Sachs present.\nA [presentation](https://www.youtube.com/watch?v=t0Eh1sq9r5s) by Huss\nEl-Sheikh from startup 9fin helped ease my concerns.\n\n\nAround this time, I moved from Windows to Ubuntu to make it easier to work\nwith GDK.\n\n\nI continued to learn a lot from my contributions, feedback, and interactions\nwith the GitLab team, again applying what I could back in the office. Much\naround the languages/technologies I hadn’t previously worked with (namely\nruby, postgres and vue), but also other takeaways such as:\n\n* when carrying out code reviews ask questions rather than give instructions\n(“what do you think about x?” is more productive than “change this to y”)\n\n* GitLab CI is capable of automating a lot of what we currently do by hand\n(e.g. code review for best practices)\n\n* always try to add tests when making code changes\n\n\nI am a firm believer of documenting processes, decisions, and rationale.\nThere’s nothing worse than someone saying “we do it this way” without being\nable to back that up with reasoning. With that in mind, we implemented Merge\nRequest Templates to ensure our team was consistent in our approach to\ncoding, testing, and releasing.\n\n\nBy now our development team had plenty of experience with GitLab and we were\nstarting to move our support team over. To help our team leads monitor merge\nrequests, we adopted 2 simple departmental labels (`Support`/`Development`)\nand used our webhook engine to ensure every MR is automatically labelled.\n\n\n## Today / What’s Next\n\nIn preparation for a transition to .NET core, deprecation of the Windows\nshell runner and a desire to start testing our frontend (web), I started\nputting a CI script together using docker and the\nmcr.microsoft.com/dotnet/core/sdk:latest image. The .gitlab-ci.yml looks\nlike;\n\n\n```yml\n\nstages:  \n  - build\n  - test\n\nvariables:\n  CI_DEBUG_TRACE: \"false\"\n  ASSEMBLY_VERSION: \"1.0.1\"\n\nbuild:\n stage: build\n tags:\n  - docker\n script:\n  - 'dotnet build'\n\ntest:\n stage: test\n tags:\n  - docker\n script:\n  - 'nohup dotnet run --project Web &'\n  - 'apt-get update'\n  - 'apt-get install -y unzip'\n  - 'wget https://chromedriver.storage.googleapis.com/83.0.4103.14/chromedriver_linux64.zip'\n  - 'unzip chromedriver_linux64.zip -d ~/'\n  - 'rm chromedriver_linux64.zip'\n  - 'mv -f ~/chromedriver /usr/local/bin/chromedriver'\n  - 'chown root:root /usr/local/bin/chromedriver'\n  - 'chmod 0755 /usr/local/bin/chromedriver'\n  - 'wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add -'\n  - 'sh -c ''echo \"deb https://dl.google.com/linux/chrome/deb/ stable main\" >> /etc/apt/sources.list.d/google.list'''\n  - 'apt-get update'\n  - 'apt-get install -y google-chrome-stable'\n  - 'dotnet test -l:trx Tests/Tests.csproj /p:CollectCoverage=true'\n coverage: '/Total\\s*\\|.*\\|\\s(\\d+\\.?\\d*)%\\s*\\|.*\\|/'\n```\n\n\nAnd the tests look something like;\n\n\n```c#\n    public class UiTests : IDisposable\n    {\n        private readonly Process _webServerProcess;\n        private readonly IWebDriver _driver;\n\n        [Fact]\n        public void ClickNavPrivacyPolicy()\n        {\n            _driver.Navigate()\n                .GoToUrl(\"http://localhost:5000/\");\n\n            var link = _driver.FindElement(By.LinkText(\"Privacy\"));\n            link.Click();\n\n            Assert.Equal(\"http://localhost:5000/Home/Privacy\", _driver.Url);\n        }\n\n        public UiTests()\n        {\n            ChromeOptions chromeOptions = new ChromeOptions();\n            chromeOptions.AddArguments(\"headless\", \"no-sandbox\");\n            _driver = new ChromeDriver(chromeOptions);\n\n            if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux)) return;\n\n            _webServerProcess = new Process\n            {\n                StartInfo = {\n                    WorkingDirectory = Path.Combine(System.AppDomain.CurrentDomain.BaseDirectory, \"..\", \"..\", \"..\", \"..\", \"Web\"),\n                    FileName = $\"dotnet.exe\",\n                    Arguments = \" run\",\n                    UseShellExecute = true,\n                }\n            };\n            _webServerProcess.Start();\n        }\n\n        private void KillWebServer()\n        {\n            if (_webServerProcess != null && !_webServerProcess.HasExited)\n            {\n                _webServerProcess.Kill();\n            }\n        }\n\n        public void Dispose()\n        {\n            _driver.Dispose();\n            KillWebServer();\n        }\n    }\n```\n\n\nYou can see some conditional code in there which allows Selenium tests to\nwork both locally on our development machines and remotely on our GitLab\nrunner. If you have a better way of achieving this, please leave a comment.\nI would love to chat and learn!\n\n\nI also want to start introducing some linting like we see in the GitLab\nproject to enforce rules around code formatting (spaces, carriage returns,\nindentation, etc.). I have started to look at JetBrains Resharper (R#)\ncommand-line but haven’t had enough time to implement it yet. Ideally. I\nwould like to start with just a rule or two and then slowly introduce more,\nbut it looks quite tricky to take this approach. Please let me know if\nyou’ve been able to achieve this!\n\n\nI would also like to lose our helpdesk and start using GitLab issues,\nservice desk, timelogs, etc. I am working on identifying the gaps and\nworking with the product managers to understand whether it is realistic to\nfill those gaps within the GitLab product. Alternatively, I will be looking\nto build some additional “bolt-ons” using webhooks and the API.\n\n\nWhile investigating gaps, I stumbled upon the [GitLab-Triage\nproject](https://gitlab.com/gitlab-org/gitlab-triage) and I expect we'll use\nthis to automate various workflows. I managed to help close a few issues and\neven create a few additional features which would make it work for us by\n[contributing to the GitLab-Triage\nproject](https://gitlab.com/gitlab-org/gitlab-triage/-/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&author_username=leetickett).\n\n\nWe also added more labels (`needs code review` & `needs functional review`)\nfor our merge request approval process now. We can see where we are and what\nneeds to be done at a glance. We previously relied on an MR checklist that\nwe are deprecating.\n\n\n![Merge request\nchecklist](https://about.gitlab.com/images/blogimages/lee-tickett-my-gitlab-journey/mr_checklist.png)\n\n\n![Merge requests with\nlabels](https://about.gitlab.com/images/blogimages/lee-tickett-my-gitlab-journey/merge_requests_with_labels.png)\n\n\n## Contributing to GitLab \n\n\nI am very proud to have joined the GitLab Core Team. Thanks to everyone who\nhas held my hand and patiently assisted me with contributions. \n\n\nWith the release of Microsoft Windows Subsystem for Linux v2, I have gone\nback to running Windows on my laptop with GDK running in Ubuntu on WSL2.\nThis is working brilliantly for me at the moment (the way Visual Studio Code\nhandles things especially is really cool).\n\n\nI now have 95 [merged merge\nrequests!](https://gitlab.com/dashboard/merge_requests?scope=all&utf8=%E2%9C%93&state=merged&author_username=leetickett)\nand have been helping several others get started contributing (getting GDK\nup and running etc). Once this crazy pandemic is over and we can start to\nsocialise again, I would like to try and start some sort of local\nmeetup/group.\n\n\nI would like to help make it easier to connect GitLab users. I have visions\nof a mechanism to search for others based:\n\n* the size of their user base \n\n* the languages they are using\n\n* the feature they are using\n\n\nAt present, we have several tools (Gitter, Issues, Forum etc) but there is a\nstrong reliance on being engaged and stumbling on questions/support\nrequests. I suspect many of us would be happy to have other users reach out\ndirectly.\n\n\nIf you need any more information around:\n\n* getting your development environment/tools setup on Windows 10\n\n* getting CI working with .NET and SQL Server projects\n\n* building customisations using GitLab webhooks and API\n\n\n...or would like to see a demo of anything discussed above, I would be happy\nto oblige!\n\n\nI would love to connect with others who are either looking to, or already\nusing GitLab for:\n\n* .NET projects\n\n* customer helpdesk \n\n* customer billing (using timelogs)\n\n\nThanks for reading! Here's a picture of me and the family repping with our\nGitLab merch!\n\n\n![The tickett family repping\nGitLab](https://about.gitlab.com/images/blogimages/lee-tickett-my-gitlab-journey/landing_page.png)\n",[108,268,1364,9,721,722,696],{"slug":2802,"featured":6,"template":699},"lee-tickett-my-gitlab-journey","content:en-us:blog:lee-tickett-my-gitlab-journey.yml","Lee Tickett My Gitlab Journey","en-us/blog/lee-tickett-my-gitlab-journey.yml","en-us/blog/lee-tickett-my-gitlab-journey",{"_path":2808,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2809,"content":2815,"config":2820,"_id":2822,"_type":13,"title":2823,"_source":15,"_file":2824,"_stem":2825,"_extension":18},"/en-us/blog/lessons-in-iteration-from-new-infrastructure-team",{"title":2810,"description":2811,"ogTitle":2810,"ogDescription":2811,"noIndex":6,"ogImage":2812,"ogUrl":2813,"ogSiteName":685,"ogType":686,"canonicalUrls":2813,"schema":2814},"Lessons in iteration from a new team in infrastructure","A new, small team at GitLab discovered that minimum viable change applies to scaling problems too.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681724/Blog/Hero%20Images/skateboard-iteration.jpg","https://about.gitlab.com/blog/lessons-in-iteration-from-new-infrastructure-team","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Lessons in iteration from a new team in infrastructure\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sean McGivern\"}],\n        \"datePublished\": \"2020-11-09\",\n      }",{"title":2810,"description":2811,"authors":2816,"heroImage":2812,"date":2817,"body":2818,"category":832,"tags":2819},[2471],"2020-11-09","\n\nThe [Scalability Team][scalability] has the goal of understanding\npotential scaling bottlenecks in our application. We formed a year ago\nwith one person, and as of early 2020, we are made up of three backend\nengineers, plus one site reliability engineer. We are a\nsort of [program team] so we have a wide remit, and there's only one\nsimilar team at GitLab: our sibling [Delivery Team][delivery]. All of\nthe backend engineers in the team (including me) came from\nworking on product development rather than infrastructure work.\n\n[scalability]: /handbook/engineering/infrastructure/team/scalability/\n[program team]: https://lethain.com/programs-owning-the-unownable/\n[delivery]: /handbook/engineering/infrastructure/team/delivery/\n\nWe recently finished a project where we [investigated our use of\nSidekiq][sidekiq] and made various improvements. We decided to continue\nthe same approach of looking at services, and got started with our next\ntarget of Redis. Here are some lessons we took away:\n\n[sidekiq]:/blog/scaling-our-use-of-sidekiq/\n\n## 1. Don't lose sight of what matters most: impact\n\nWe chose to split our work on Redis into three phases:\n\n1. [Visibility][v]: increase visibility into the service.\n2. [Triage][t]: use our increased visibility to look for problems and\n   potential improvements, and triage those.\n3. [Knowledge sharing][ks]: share what we learned with the rest of the\n   Engineering department.\n\n[v]: https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/309\n[t]: https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/309\n[ks]: https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/265\n\n[Iteration] is crucial at GitLab, so much so that we have regular\n[Iteration Office Hours]. On the surface, you could say that we were\niterating here: our issues were small and well-scoped and we were\ndelivering code to production regularly.\n\n[Iteration]: https://handbook.gitlab.com/handbook/values/#iteration\n[Iteration Office Hours]: /handbook/ceo/#iteration-office-hours\n\nThe problem, as it turned out, was that we were focused so heavily on\nunderstanding the service, that we lost track of the [results] we were\ntrying to deliver. Our [values hierarchy] puts results at the top, but\nwe hadn't given the results enough attention. We are a small team that\nneeds to cover a wide area, and we need to deliver _impactful_ changes.\n\n[results]: https://handbook.gitlab.com/handbook/values/#results\n[values hierarchy]: https://handbook.gitlab.com/handbook/values/#hierarchy\n\nThere are some [examples in our handbook][impact] – which we've added as\na result of this project – but we define impact as either having a\ndirect effect on the platform, our infrastructure, or our development\nteams. That was what was missing here, because the impact was loaded\ntowards the very end of the project: largely in the knowledge sharing\nsection.\n\n[impact]: /handbook/engineering/infrastructure/team/scalability/#impact\n\nWe spent a long time (several months) improving our visibility, which\ndefinitely has a positive impact on our SREs who spend time\ninvestigating incidents. But we could have delivered this value and more\nin a shorter time period, if we had kept clear sights on the impact we\nwanted to have.\n\n## 2. Minimum viable change applies to scaling problems too\n\nWith that framing in mind, it's quite clear that we weren't iterating in\nthe best way. To use a famous example, it's like we'd started building a\ncar by building the wheels, then the chassis, etc. That takes a long\ntime to get something useful. We could have started by [building a\nskateboard]. We didn't have a good sense of what a [minimum viable change](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc)\nwas for our team, so we got it wrong.\n\n[building a skateboard]: https://blog.crisp.se/2016/01/25/henrikkniberg/making-sense-of-mvp\n\n![Building a skateboard iteration](https://about.gitlab.com/images/blogimages/scalability-redis-efficiency-skateboard.png){: .medium.center}\nIllustration by [Henrik Kniberg](https://blog.crisp.se/2016/01/25/henrikkniberg/making-sense-of-mvp)\n{: .note.text-right}\n\nWhat would a minimum viable change look like? When we worked on this project, we\ncovered several topics: adding Redis calls to our standard structured\nlogs, exposing slow log information, and so on. With hindsight, the best\nway would probably be to slice the project differently. We could take\nthe three steps above (visibility, triage, knowledge sharing), but\nconsider them all to be necessary for a project on a single topic with a\ntangible goal.\n\nWe did this, with all the impact at the end:\n\n![Working through the first step for all topics, the second step for all topics, and finally having impact in the third step](https://about.gitlab.com/images/blogimages/scalability-redis-efficiency-before.jpg)\n\nBut traveling in the other direction would have been much more\neffective:\n\n![Working through all steps for the first topic, having impact, then starting again at the second topic](https://about.gitlab.com/images/blogimages/scalability-redis-efficiency-after.jpg)\n\nThis leads to a state where:\n\n1. The impact we make is clearer.\n2. We start making an impact sooner.\n3. We can re-assess after every project, and stop early once we have\n   done enough.\n\nThe sooner we have this impact, the sooner we can see the results of\nwhat we've done. It's also good for morale to see these results on a\nregular basis!\n\n## 3. Shape your projects to deliver impact throughout\n\nThe way that we originally structured our work to improve Redis usage made it harder to see\nour impact than it should have been. For example, we [updated our\ndevelopment documentation][dev-docs-update] at the end of the project.\nThis was useful, but it would have been much more useful to backend\nengineers if we'd updated the documentation along the way, so they always had the best information we could give them.\n\n[dev-docs-update]: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/41889\n\nFor a more positive example: in the visibility stage, we created\na couple of issues directly for stage groups to address, rather than\nwaiting for the triage or knowledge sharing stage to do so. One of those\nissues was about [large cache entries for merge request\ndiscussions][mr-cache]. By getting this in front of the relevant\ndevelopment team earlier, we were able to\nget the fix scheduled and completed sooner as well.\n\n[mr-cache]: https://gitlab.com/gitlab-org/gitlab/-/issues/225600\n\nRegularly delivering projects with clear impact means that we get\nfeedback earlier (from engineers in Development and Infrastructure, or\nfrom the infrastructure itself), we can cover a wider area in less time,\nand we are happier about the work we're doing.\n\nAs people who went from working directly on user-facing features to\nworking on a property of the system as a whole, we learned that we can\nstill set ourselves an MVC to keep us on the right path, as long as we\nthink carefully about the results we want to achieve.\n\n[Cover image](https://unsplash.com/@viniciusamano?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText) by shawn henry on [Unsplash](https://unsplash.com/s/photos/skateboard?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText)\n{: .note}\n",[790,1074,9],{"slug":2821,"featured":6,"template":699},"lessons-in-iteration-from-new-infrastructure-team","content:en-us:blog:lessons-in-iteration-from-new-infrastructure-team.yml","Lessons In Iteration From New Infrastructure Team","en-us/blog/lessons-in-iteration-from-new-infrastructure-team.yml","en-us/blog/lessons-in-iteration-from-new-infrastructure-team",{"_path":2827,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2828,"content":2834,"config":2840,"_id":2842,"_type":13,"title":2843,"_source":15,"_file":2844,"_stem":2845,"_extension":18},"/en-us/blog/low-code-no-code",{"title":2829,"description":2830,"ogTitle":2829,"ogDescription":2830,"noIndex":6,"ogImage":2831,"ogUrl":2832,"ogSiteName":685,"ogType":686,"canonicalUrls":2832,"schema":2833},"The role low code app development tools may play at GitLab","Citizen developers are creating code without being coders. CEO Sid Sijbrandij and senior PMM Parker Ennis explain the impact of low code app development tools on GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681170/Blog/Hero%20Images/lowcodenocode.jpg","https://about.gitlab.com/blog/low-code-no-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The role low code app development tools may play at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-03-26\",\n      }",{"title":2829,"description":2830,"authors":2835,"heroImage":2831,"date":2837,"body":2838,"category":718,"tags":2839},[2836],"Valerie Silverthorne","2020-03-26","\n\nIf software is eating the world and there is a [worldwide shortage of software developers](https://www.icims.com/hiring-insights/for-employers/how-to-win-tech-talent), how can companies stay in the game?\n\nOne answer: The [citizen developer](https://www.forbes.com/sites/johneverhard/2019/01/22/the-pros-and-cons-of-citizen-development/#2376328184fd). Empowered by technology, the so-called citizen developer is able to create code without a formal developer background. Two types of tools allow this: Low code app development tools let a citizen developer build apps using only the most rudimentary of coding skills, while no-code solutions are generally WYSIWYG choices that allow someone to create an app, or part of an app, using pre-assembled pieces of code.\n\nLow code and no code tools have been available for a long time – 4GL, computer-assisted software engineering (CASE) and rapid application development (RAD) tools were all precursors – and according to [IDC](http://www.idc.com), today their use is on the rise. In fact out of 23.4 million developers worldwide in 2019, IDC said 1.76 million of them are low coders, representing 7.5% of the total. There were also 810,000 no-code developers worldwide last year, according to IDC’s Market Perspective: Low-Code and No-Code Developer Census, 2019: Growth Begins in Earnest report.\n\nGiven the growing popularity, it’s not surprising the GitLab development team is taking a hard look at [how to leverage and/or integrate low code functionality](https://gitlab.com/groups/gitlab-org/-/epics/2353#note_263252013) into our tool. Recently CEO [Sid Sijbrandij](/company/team/#sytses) sat down with senior product marketing manager [Parker Ennis](/company/team/#parker_ennis) to talk about the role low code solutions can and should play at Gitlab.\n\n“So what I like about low code is the potential to have more people programming,” Sid tells Parker. And Parker is definitely enthusiastic as well. “What interests me in low code is providing the ease of getting into something like coding,” he explains. “There’s a high barrier of entry when it comes to programming. I found that first hand when I was an undergrad trying to learn Ruby on Rails. It was an intimidating, tough experience but for other people it’s something innate inside them. One of the really cool benefits of low code is you can have people starting to learn how to code without the intimidating factor.”\n\nAlso there’s no question there are simply not enough people with coding skills to fill the demand for software, Parker says, pointing to data from industry analyst and blogger [James Governor](https://redmonk.com/jgovernor/author/jgovernor) who says the world will need around 100 million developers in 10 years. Remember, we’re at just one quarter of that today.\n\nParker is particularly excited about the potential of low code tools to get kids interested in programming at an early age. “How can we educate the next generation in how to solve the problems we are creating today?” he asks. “Low code is a viable option.”\n\nMeanwhile today at GitLab we’re looking at ways we can make it easier to integrate low code tools into our workflow, Parker says. We might go further than that if a viable open source low-code tool arrives on the market.\n\n**Learn more about app develompent tools:**\n\n[Unify your logs and metrics](/blog/unifylogsmetrics/)\n\n[Get the most out of performance testing](/blog/how-were-building-up-performance-testing-of-gitlab/)\n\n[Up your merge train game](/blog/all-aboard-merge-trains/)\n\nCover image by [Anas Alshanti](https://unsplash.com/@otenteko) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[232,790,9],{"slug":2841,"featured":6,"template":699},"low-code-no-code","content:en-us:blog:low-code-no-code.yml","Low Code No Code","en-us/blog/low-code-no-code.yml","en-us/blog/low-code-no-code",{"_path":2847,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2848,"content":2853,"config":2858,"_id":2860,"_type":13,"title":2861,"_source":15,"_file":2862,"_stem":2863,"_extension":18},"/en-us/blog/manage-agile-teams-with-microservices",{"title":2849,"description":2850,"ogTitle":2849,"ogDescription":2850,"noIndex":6,"ogImage":1854,"ogUrl":2851,"ogSiteName":685,"ogType":686,"canonicalUrls":2851,"schema":2852},"How to manage Agile teams with microservices","GitLab Groups and Projects can help teams divide work by product or system.","https://about.gitlab.com/blog/manage-agile-teams-with-microservices","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to manage Agile teams with microservices\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2019-08-23\",\n      }",{"title":2849,"description":2850,"authors":2854,"heroImage":1854,"date":2855,"body":2856,"category":718,"tags":2857},[852],"2019-08-23","\n\nWe’re getting closer to the 2019 finish line, but there’s still time to jump on\nthe microservices train to accelerate your team’s delivery. We’ve written about\nmicroservices in the past, including discussing\n[best practices for microservices implementation](/blog/strategies-microservices-architecture/)\nand [GitLab’s integrated vision for microservices](/blog/microservices-integrated-solution/),\nbut I’m here to share something a little different: How you can use microservices\nto manage your team.\n\nBut first, a recap: Microservices is a collection of independently deployable\nservices that advances a goal, with each application managing a specific function\n_really_ well.\n\n> “The term ‘Microservice Architecture’ has sprung up over the last few years to\ndescribe a particular way of designing software applications as suites of\nindependently deployable services.” –\n[Martin Fowler](https://martinfowler.com/articles/microservices.html)\n\n## GitLab microservices for Agile team management\n\nUsing GitLab [Projects](https://docs.gitlab.com/ee/user/project/) and\n[Groups](https://docs.gitlab.com/ee/user/group/), teams can organize their work\nto increase visibility and collaboration. GitLab supports Agile teams by providing\n[Milestones](https://docs.gitlab.com/ee/user/project/milestones) (or sprints),\n[Issues](https://docs.gitlab.com/ee/user/project/issues/) (or user stories),\n[Weights](https://docs.gitlab.com/ee/user/project/issues/issue_weight.html) (or points and estimation),\nand other common [Agile artifacts](/blog/gitlab-for-agile-software-development/).\n\nHere are a few ways to use groups and projects:\n\n### Organizing your team by system\n\nOne of the more traditional ways to divide work, organizing by system separates\nteams by component and subsystem. For example, the teams that handle mobile iOS,\nmobile Android, and website have different projects, each with their own code\nrepo and [issue tracker](https://docs.gitlab.com/ee/user/project/issues/). This\ntype of structure works well with operations-driven organizations, but it’s not\na modern approach, so we recommend one of the following structures instead.\n\n### Organizing your team by product area\n\nDividing work by product is a best practice that drives business value. Using\nGitLab Groups, you can create `Code` and `Teams`. Within `Code`, separate projects\nrepresent various components (e.g. mobile iOS and user accounts), with individual\ncode repositories and sets of [merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/).\nOnce you’ve created your projects (and code repos), you can build another group\nfor `Teams`, which includes fullstack product teams (i.e., engineers, PMs, designers),\nenabling parallel milestones and Agile boards. The benefit of organizing work by\nproduct area is that there’s a separation between code repos and work, so that\nevery piece of code in your organization is open to contributions from all teams.\n\n### Organizing your team with a hybrid approach\n\nThis approach combines both product and system organization structures and is\nwell suited for organizations that have cross-platform teams. For example, a mobile\nteam has dedicated iOS and Android engineers rather than full teams for both\nplatforms. In this model, the `Code` group will have individual projects according\nto component, but `Teams` is consolidated so that there’s only a website and mobile\nteam.\n\nWatch this demo and check out its\ncorresponding [example application](https://gitlab.com/trustful-finance-demo) to see groups and projects in action. 🍿\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/VR2r1TJCDew\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nDoes your team use microservices for Agile development? We’d love to hear your\nthoughts.\n\nCover image by [Martin Sanchez](https://unsplash.com/@martinsanchez?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/MD6E2Sv__iA)\n{: .note}\n",[744,696,722,9],{"slug":2859,"featured":6,"template":699},"manage-agile-teams-with-microservices","content:en-us:blog:manage-agile-teams-with-microservices.yml","Manage Agile Teams With Microservices","en-us/blog/manage-agile-teams-with-microservices.yml","en-us/blog/manage-agile-teams-with-microservices",{"_path":2865,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2866,"content":2872,"config":2878,"_id":2880,"_type":13,"title":2881,"_source":15,"_file":2882,"_stem":2883,"_extension":18},"/en-us/blog/marker-io-gitlab-integration",{"title":2867,"description":2868,"ogTitle":2867,"ogDescription":2868,"noIndex":6,"ogImage":2869,"ogUrl":2870,"ogSiteName":685,"ogType":686,"canonicalUrls":2870,"schema":2871},"How to radically simplify bug reporting in GitLab","Marie Hargitt from Marker.io shares how product teams can empower colleagues to report actionable issues in GitLab, without driving developers crazy.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679899/Blog/Hero%20Images/gitlab-marker-io.png","https://about.gitlab.com/blog/marker-io-gitlab-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to radically simplify bug reporting in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Marie Hargitt\"}],\n        \"datePublished\": \"2019-01-09\",\n      }",{"title":2867,"description":2868,"authors":2873,"heroImage":2869,"date":2875,"body":2876,"category":832,"tags":2877},[2874],"Marie Hargitt","2019-01-09","\n\nIf you’re like us, you’re constantly pushing out new features and improvements to your product, but with those updates and changes comes the inevitable risk of bugs. The best way to find and fix those bugs are your internal reporters and developers, but getting the whole team to report bugs into GitLab can be hard.\n\nWhether it’s your copywriters on the lookout for wonky content, your QA testers that find a broken form, designers that spot a font size five times too big, or your customer support team receiving word that a billing issue is blocking customers from paying – reporters can take forever to send actionable feedback to developers, who in turn don’t always get the information they need to smash those bugs.\n\n## What a bug-reporting workflow usually looks like ...\n\n### ... for reporters\n\nBecause reporters aren’t always super tech-savvy, it can be tricky for them to share reports that are helpful for your developers. The process is long, complicated, and tracking down the crucial technical information isn’t always easy.\n\nIn most teams, reporting bugs into GitLab looks like this:\n1. Find the bug.\n1. Open screenshot tool, capture bug.\n1. Open software to annotate screenshot, add comments.\n1. Open and log into GitLab.\n1. Select the correct project.\n1. Create new issue.\n1. Document the bug. (How exactly do I do this!?)\n1. Add technical information. (What is this even?)\n1. Attach screenshots.\n1. And then finally: submit report.\n\nThat’s a whopping 10 steps to report even the smallest bugs.\n\nAnd we didn’t even mention the super-fun scavenger hunt reporters have to go on to identify all of the environmental data developers need to even start thinking about fixing the bugs.\n\n### ... for developers\n\nDevelopers get feedback flying at them in all forms – emails, phone calls, sticky notes and screenshots.\n\nThey’re ready to gouge their eyes out because they can’t reproduce the reported bugs, because they’re not receiving actionable feedback from the get-go, and they don’t have time to investigate all the bug reports they receive.\n\n## So what can you do to make sure everyone can contribute?\n\n### Speed up workflow for reporters\n\nWe created Marker.io to speed up and simplify your team bug reporting. Now, those 10 steps are only three:\n\n1. Capture and annotate screenshot of bug.\n1. Send bug reports straight to your GitLab project.\n1. Keep hunting for more bugs!\n\nOne real-life example is an issue we ran into with our pricing page a while back. During our QA process, we noticed a weird bug: the price for our Team Plan was mysteriously missing. Instead of using the lengthy process mentioned earlier in this post, we used Marker.io to quickly send feedback to our dev team and get the bug fixed in no time.\n\nThis is what reporting the issue with Marker.io looked like:\n\n![Creating the bug report issue in GitLab](https://about.gitlab.com/images/blogimages/GitLab-creating-issue-Marker-io.gif){: .shadow.center.medium}\n\nNow, not only is the process much faster, but you never have to leave your website, there is nothing to configure, and all the technical data the developers need is automatically captured by Marker.io.\n\n### Create actionable reports for your developers\n\nOnce a visual feedback tool like Marker.io is introduced into the equation your developers can choose where they receive feedback, down to the specific bug-tracking GitLab project, and the important technical data they need is automatically grabbed and included in every bug report.\n\nThat means environment data, including:\n- Browser\n- Operating system (OS) and version\n- Screen size\n- Zoom level\n- Pixel ratio\n\nHere’s an example of what a Marker.io bug report looks like in GitLab:\n\n![The bug report issue inside GitLab](https://about.gitlab.com/images/blogimages/GitLab-issue-created-with-Marker-io.gif){: .shadow.center.medium}\n\nThis GitLab issue has all the information needed for your developers to act on it:\n\n- The issue is in the correct project.\n- Any pre-set epics, milestones or labels are included.\n- The issue is assigned to a team member.\n- The annotated screenshot is attached.\n- The expected and actual results are well documented.\n- The steps to reproduce are detailed.\n- The technical environment information is all there.\n- The issue has the URL where the screenshot was captured.\n- The issue has a due date.\n\nNo more wasted time following up with reporters to fill in the gaps. It’s all there, organized directly in your chosen GitLab project – complete with everything vital to fix your bugs.\n\nWant to try for yourself? Marker.io comes with a free 15-day trial. Give it go ➡️ [Marker.io/gitlab](https://marker.io/gitlab?utm_source=gitlab&utm_medium=post&utm_campaign=gitlab_bug_reporting)\n\n### About the guest author\n\nMarie Hargitt is the Marketing Manager of [Marker.io](https://marker.io/gitlab), a powerful tool that makes bug reporting and visual feedback easy for the whole team.\n",[696,232,9],{"slug":2879,"featured":6,"template":699},"marker-io-gitlab-integration","content:en-us:blog:marker-io-gitlab-integration.yml","Marker Io Gitlab Integration","en-us/blog/marker-io-gitlab-integration.yml","en-us/blog/marker-io-gitlab-integration",{"_path":2885,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2886,"content":2892,"config":2898,"_id":2900,"_type":13,"title":2901,"_source":15,"_file":2902,"_stem":2903,"_extension":18},"/en-us/blog/meet-gitlab-duo-workflow-the-future-of-ai-driven-development",{"title":2887,"description":2888,"ogTitle":2887,"ogDescription":2888,"noIndex":6,"ogImage":2889,"ogUrl":2890,"ogSiteName":685,"ogType":686,"canonicalUrls":2890,"schema":2891},"Meet GitLab Duo Workflow - the future of AI-driven development","Workflow, our autonomous AI agent, will transform the way teams build and ship software – our first bold step towards AI-driven DevSecOps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749658912/Blog/Hero%20Images/blog-image-template-1800x945__20_.png","https://about.gitlab.com/blog/meet-gitlab-duo-workflow-the-future-of-ai-driven-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Meet GitLab Duo Workflow - the future of AI-driven development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David DeSanto, Chief Product Officer, GitLab\"}],\n        \"datePublished\": \"2024-06-27\",\n      }",{"title":2887,"description":2888,"authors":2893,"heroImage":2889,"date":2895,"body":2896,"category":764,"tags":2897},[2894],"David DeSanto, Chief Product Officer, GitLab","2024-06-27","Imagine if software wrote itself. It seems like a distant future, but with ongoing advancements in large language models and GitLab’s unified AI-powered DevSecOps platform, that future is quickly coming into focus. During our [GitLab 17 launch event](https://about.gitlab.com/seventeen/), we announced GitLab Duo Workflow, an autonomous AI agent that will transform how teams build, secure, deploy, and monitor software.\n\nGitLab Duo Workflow moves beyond the current landscape of reactive, prompt-based AI assistants by creating an autonomous team member actively contributing to optimize every aspect of your software development lifecycle. Workflow distinguishes itself by leveraging GitLab’s unified data store, which seamlessly connects all relevant data, projects, repositories, and documentation. This allows Workflow to be an intelligent, always-on agent that constantly monitors your projects, anticipates potential production issues, automatically identifies and resolves vulnerabilities, optimizes your applications for peak performance, and streamlines onboarding by rapidly building customized remote development environments.\n\nAI is transforming how secure software is created, maintained, updated, deployed, and monitored, enabling organizations to ship more software than ever before. GitLab Duo Workflow represents our first bold step towards AI-driven DevSecOps. We aim to empower developers to focus on high-level problem-solving, innovation, and value creation, while [GitLab Duo](https://about.gitlab.com/gitlab-duo/) handles repetitive tasks and optimization behind the scenes.\n\n## The vision for GitLab Duo Workflow\nWith GitLab Duo Workflow, we are laser-focused on tackling several key use cases to automate and optimize the software development process from end to end.\n### 1. Development automated\n\nStraight from the IDE, GitLab Duo Workflow helps plan and prioritize tasks tailored to individual projects and defined organizational processes. Using the requirements from a particular work item (whether an epic, issue, or task), Workflow produces an implementation plan that developers can review and refine. Then, Workflow works through the plan, generating or rewriting code to accomplish and meet the defined requirements. Workflow accomplishes this by operating within a [GitLab Remote Development workspace](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/), allowing it to safely and securely evaluate, write, and test code changes. This also ensures that Workflow produces code that both meets the requirements as well as passes all CI pipeline tests, including security scans. If the pipeline fails, Workflow automatically addresses issues and troubleshoots as needed, ensuring only high-quality code that meets your organization's standards is created and committed to your projects.\n\nOnce ready, Workflow will automatically create a merge request outlining its code changes and go through your merge request approval processes, including engaging with the code reviewer or maintainer. You can even ask Workflow to review your code and have it comment on your merge requests just as a human code reviewer would today. Even better, Workflow will implement those suggestions for you if you'd like it to. And this is only the start.\n\n### 2. Intelligent continuous improvement\nGitLab Duo Workflow will analyze your codebase in real time, suggesting architectural optimizations for enhanced efficiency, performance, and cost savings. Furthermore, it will proactively identify opportunities for code refactoring to improve scalability and address technical debt by suggesting changes to developers or automatically implementing them in a sandbox environment. Additionally, Workflow will dynamically manage cloud resources to prevent overprovisioning and ensure your applications always meet their performance targets.\n\n### 3. Proactive security and compliance\nSecurity and compliance are top priorities for any organization. GitLab Duo Workflow will automatically ask developers to apply patches, refactor insecure code, and adapt to emerging threats in real time. Moreover, Workflow will continuously assess security risks associated with your applications and production environments and assist you with implementing mitigating controls.\n\n### 4. Self-optimizing performance\nGitLab Duo Workflow will incorporate sophisticated feedback loops for continuous learning and improvement. By analyzing data from monitoring tools, user interactions, and business outcomes, it will continuously refine its view of your codebase to ensure that your application architectures are always aligned with your business needs. As we see with all AI, Workflow will constantly improve, catching and fixing its own mistakes as it learns to be a partner in your organization.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://player.vimeo.com/video/967982166?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allowfullscreen=\"true\" title=\"GitLab Duo Workflow the future of AI-driven DevSecOps\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## The future of AI is now\nGitLab Duo Workflow represents an exciting leap forward, transitioning from AI that requires constant human prompting to AI that drives development workflows and processes, seeking human guidance when needed. With GitLab's unified, AI-driven interface spanning the DevSecOps lifecycle, organizations will be able to create a new generation of AI-powered applications with unparalleled speed, efficiency, and innovation, all while maintaining the highest standards of security and compliance. No tradeoffs.\n\nStay tuned for more updates and insights as we continue to push the boundaries of what's possible with AI in software development. Together, let's embrace the future of AI-driven DevSecOps and unlock the full potential of your teams and organizations.\n\n> If you are curious about AI-driven DevSecOps and want to be part of this journey, including access to the pre-release program, please [sign up for our GitLab Duo Workflow waitlist](https://forms.gle/5ppRuNVb8LwSPNVJA).",[766,495,1159,1014,9],{"slug":2899,"featured":90,"template":699},"meet-gitlab-duo-workflow-the-future-of-ai-driven-development","content:en-us:blog:meet-gitlab-duo-workflow-the-future-of-ai-driven-development.yml","Meet Gitlab Duo Workflow The Future Of Ai Driven Development","en-us/blog/meet-gitlab-duo-workflow-the-future-of-ai-driven-development.yml","en-us/blog/meet-gitlab-duo-workflow-the-future-of-ai-driven-development",{"_path":2905,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2906,"content":2912,"config":2918,"_id":2920,"_type":13,"title":2921,"_source":15,"_file":2922,"_stem":2923,"_extension":18},"/en-us/blog/meltano-functional-group-update-post",{"title":2907,"description":2908,"ogTitle":2907,"ogDescription":2908,"noIndex":6,"ogImage":2909,"ogUrl":2910,"ogSiteName":685,"ogType":686,"canonicalUrls":2910,"schema":2911},"New Meltano personas, priorities, and updates from the team","There's a lot going on — here are some of the highlights on user research, dogfooding Meltano, embedding engineers, and hiring!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678847/Blog/Hero%20Images/meltano-fgu.jpg","https://about.gitlab.com/blog/meltano-functional-group-update-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"New Meltano personas, priorities, and updates from the team\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacob Schatz\"}],\n        \"datePublished\": \"2018-10-08\",\n      }",{"title":2907,"description":2908,"authors":2913,"heroImage":2909,"date":2915,"body":2916,"category":832,"tags":2917},[2914],"Jacob Schatz","2018-10-08","\nJacob Schatz here, Staff Engineer for [Meltano](https://gitlab.com/meltano)! We've been heads down working on improving Meltano, and figured it was time for an update. We've had some great conversations that have helped us identify two general personas. Our team is also growing, and we're ready for frontend contributions, but more on that later.\n\nWe've been conducting interviews to zero in on what our users will want, what they're currently doing, and what tools they're using. Over the course of those conversations, we saw two main scenarios emerge. People either wanted a command line interface (CLI) or a graphical user interface (GUI). The GUIs that exist are painful to use, and not very intuitive. In both scenarios, people we spoke with are frustrated. This goes back to the original reason [we decided to create Meltano](/blog/hey-data-teams-we-are-working-on-a-tool-just-for-you/) — our data team members were relying on frustrating and expensive toolsets with poor UIs.\n\n### What are the Meltano personas?\n\nOur conversations revealed two general types of users:\n* Users who have engineers on staff\n* Users who do not have engineers on staff, or their engineers do not have bandwidth to help them\n\nThe Data team at GitLab, for example, has data engineers on staff who are willing, able, and happy to write Python. We won't be able to write every extractor and loader, so our users can follow our [specifications](https://gitlab.com/meltano/specifications), which are based off of the [Singer specifications](https://github.com/singer-io/getting-started). We want to make that as easy as possible, so Meltano can be the glue between all these different pieces.\n\nFor the other teams who don’t have the technical resources, we want to make it as if they had engineers on staff. Ideally, they'll just need to click a couple of buttons, run extract, load and transform with the extractors and loaders that we already have. Hopefully in the future the community can contribute more to these types of different extractors and loaders.\n\nYou can check out our updated [readme](https://gitlab.com/meltano/meltano/blob/master/README.md) with more info about Meltano and our personas. We're working iteratively, so if you have a different setup or scenario to share, we want to hear from you about your experience! Get in touch with us and tell us about your struggles or successes with your data team.\n\n### What’s next?\n\nWe're focused on our own CLI and GUI, and continuing to build more extractors and loaders (or [\"taps and targets\"](https://www.singer.io/)). We will be the glue that ties everything together. While current Singer taps and targets support extracting and loading, we'll be supporting much more, like removal of PII. Our CLI will support all of this from one configuration. We also want the CLI to have a really nice user experience, so I'm working with GitLab UX to help make it happen.\n\nAs always, we’re looking for contributors! In the [Dashboard project](https://gitlab.com/meltano/dashboard) you’ll see the Chart.js library that I’m building to make really nice dashboards for Meltano. Although we've had a ton of great Python contributions, we haven’t had as many contributors to the frontend, so we’d love your help there.\n\n### In other news\nThere's a lot going on, here are some of the highlights!\n\n#### Dogfooding\nIn my experience, unless one experiences the direct results of the code they write, and feel the pain their users feel when they hit a bug, one might not correctly solve the problem. Currently, we fulfill the data team's requests, but if something doesn't work they merely report back to us, without us experiencing the pain ourselves. We're changing how we work in order to imprint the idea that if something is broken, it's the Meltano team's responsibility. We’re all investigating every single pipeline failure, regardless of whose “fault” it is, because these suggest that it may be a poor user experience.\n\n#### Embedded engineers\nIn order to dogfood better, we've taken a data engineer from the data team, and an engineer from the Meltano team. They split their work 50/50 so each does half of their usual work and half of each other's work. It's already made a huge difference by giving us more eyes and ears on lots of issues, and allowing the engineers to approach problems from a different angle. Another added benefit is that every Meltano engineer gets direct exposure and experience from the data team, to make them better data scientists as well product engineers.\n\nThat's all for now, get in touch with us in our [issue tracker](https://gitlab.com/groups/meltano/-/boards), and tweet us [@meltanodata](https://twitter.com/meltanodata)!\n\nCover [image](https://unsplash.com/photos/2FPjlAyMQTA) by [John Schnobrich](https://unsplash.com/@johnschno) on Unsplash\n{: .note}\n\n[Emily von Hoffmann](https://about.gitlab.com/company/team/#emvonhoffmann) contributed to this post.\n{: .note}\n",[974,834,1684,790,9],{"slug":2919,"featured":6,"template":699},"meltano-functional-group-update-post","content:en-us:blog:meltano-functional-group-update-post.yml","Meltano Functional Group Update Post","en-us/blog/meltano-functional-group-update-post.yml","en-us/blog/meltano-functional-group-update-post",{"_path":2925,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2926,"content":2932,"config":2938,"_id":2940,"_type":13,"title":2941,"_source":15,"_file":2942,"_stem":2943,"_extension":18},"/en-us/blog/migrating-your-jira-issues-into-gitlab",{"title":2927,"description":2928,"ogTitle":2927,"ogDescription":2928,"noIndex":6,"ogImage":2929,"ogUrl":2930,"ogSiteName":685,"ogType":686,"canonicalUrls":2930,"schema":2931},"Migrating your JIRA issues to GitLab","We're migrating all of our working tools to open-source ones, and moving to GitLab has made all the difference.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667509/Blog/Hero%20Images/continuous-integration-from-jenkins-to-gitlab-using-docker.jpg","https://about.gitlab.com/blog/migrating-your-jira-issues-into-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Migrating your JIRA issues to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abdulkader Benchi\"}],\n        \"datePublished\": \"2017-08-21\",\n      }",{"title":2927,"description":2928,"authors":2933,"heroImage":2929,"date":2935,"body":2936,"category":832,"tags":2937},[2934],"Abdulkader Benchi","2017-08-21","\n\n Here at [Linagora](https://linagora.com/), we believe in open source. If you have read my [last article](/blog/docker-my-precious/), you should know that we have recently migrated from [Atlassian](https://www.atlassian.com/) to [GitLab](https://gitlab.com/).\n\n\u003C!-- more -->\n\n_Editor's note: We don't currently have a native way to migrate JIRA issues into GitLab issues, although we are [working on one](https://gitlab.com/gitlab-org/gitlab-ee/issues/2780)! In the meantime, we are very appreciative of community efforts to provide workarounds like this one._\n\nMigrating our repositories from [Bitbucket](https://bitbucket.org/) to GitLab was so easy thanks to Git. However, migrating our issues (aka tickets) from [JIRA](https://www.atlassian.com/software/jira) to GitLab was not so obvious. In fact, there are several alternative solutions to integrate JIRA as a plugin inside GitLab so as to continue using JIRA along with GitLab. However, our main goal was to completely leverage GitLab as our only open-source development tool.\n\nIf you want to know how to migrate your JIRA issues into GitLab, then you are on the right article. Once you read it, you will discover that it is really so easy to do the migration from JIRA to GitLab. Yes, as you can see, winter is coming to GitLab rivals, because everything is possible with GitLab.\n\n### Migrating JIRA issues into GitLab Issues\n\nOur migration process will leverage the [REST APIs](http://www.restapitutorial.com/) provided by both [JIRA REST API](https://developer.atlassian.com/jiradev/jira-apis/jira-rest-apis) and GitLab Issues [REST API](https://docs.gitlab.com/ee/api/issues.html).\n\n#### API calls:\n\nTo perform REST API cals, you can use your own preferred library. For me, I will use [axios](https://github.com/mzabriskie/axios), which is my preferred promise based HTTP client for the browser and node.js. You can simply install it locally by doing:\n\n```\nnpm install axios\n\n```\n\n#### JIRA side:\n\nBefore requesting the endpoints provided by JIRA, we need to gather the following information:\n\n```\n// the base url to your JIRA\nconst JIRA_URL = 'https://your-jira-url.com/';\n\n// the JIRA project ID (short)\nconst JIRA_PROJECT = 'PRO';\n\n// JIRA username and password used to login\nconst JIRA_ACCOUNT = {\n  username,\n  password\n};\n\n```\n\nNow, we need to call two endpoints call during the migration process. The first endpoint is to get all **JIRA issues**:\n\n```\naxios.request({\n  method: 'get',\n  url: `${JIRA_URL}/rest/api/2/search?jql=project=${JIRA_PROJECT}+order+by+id+asc&startAt=${offset}&maxResults=${limit}`,\n  auth: {\n    username: JIRA_ACCOUNT.username,\n    password: JIRA_ACCOUNT.password\n  }\n})\n```\n\nThe second endpoint is to get the **attachments** and the **comments** related to a given issue:\n\n```\naxios.request({\n  method: 'get',\n  /*\n  * JIRA_ISSUE = the JIRA issue that we get from the previous call\n  */\n  url: `${JIRA_URL}/rest/api/2/issue/${JIRA_ISSUE.id}/?fields=attachment,comment`,\n  auth: {\n    username: JIRA_ACCOUNT.username,\n    password: JIRA_ACCOUNT.password\n  }\n})\n```\n\n#### GitLab side:\n\nAs for JIRA, we need to gather some information before starting sending REST requests:\n\n```\n// the base url to your GitLab\nconst GITLAB_URL = 'http://your-gitlab-url.com/';\n\n// the project in gitlab that you are importing issues to\nconst GITLAB_PROJECT = 'namespaced/project/name';\n\n// GitLab username and password used to login\nconst GITLAB_ACCOUNT = {\n  username,\n  password\n};\n\n/* this token will be used whenever the API is invoked and\n* the jira's author of (the comment / attachment / issue) is not a gitlab user.\n* So, this identity will be used instead.\n* GITLAB_TOKEN is visible in your account: https://ci.linagora.com/profile/account\n*/\nconst GITLAB_TOKEN = 'get-this-token-from-your-profile';\n```\n\nEach JIRA issue has several fields which represent JIRA users, e.g., *assignee* and *reporter*. Once migrating to GitLab we should try to link these users to GitLab users (if they already exist on GitLab). However, if the user is not a GitLab user, then we have to leverage the **GITLAB_TOKEN** (line 18 in the last gist). That is, if the user does not exist on GitLab, then the identity of the user who is doing the migration will be used instead.\n\nTo search all GitLab users we need to send the following REST call:\n\n```\naxios.request({\n  method: 'get',\n  // 10000 users, should be enough to get them all\n  url: `${GITLAB_URL}/api/v4/users?active=true&search=&per_page=10000`,\n  headers: {\n    'PRIVATE-TOKEN': GITLAB_TOKEN\n  }\n})\n```\n\nAnd now, we can find the corresponding GitLab user for each JIRA user by doing:\n\n```\nfunction jiraToGitlabUser(JIRAUser) {\n    // GitLabUsers = the list of GitLab users we get from the last call\n    return JIRAUser ? _.find(GitLabUsers, { email: JIRAUser.emailAddress }) : null\n  }\n```\n\nIt is worth noting that JIRA and GitLab issues are different in nature, so you need to migrate one type of issue to another. After searching all [JIRA issues](https://medium.com/linagora-engineering/gitlab-rivals-winter-is-here-584eacf1fe9a) and [JIRA attachments](https://medium.com/linagora-engineering/gitlab-rivals-winter-is-here-584eacf1fe9a) and comments, we can now transfer them into GitLab issues by doing the following mapping:\n\n```\n{\n    title: JIRAIssue.fields.summary,\n    description: JIRAIssue.fields.description,\n    labels: [JIRAIssue.fields.issuetype.name],\n    created_at: JIRAIssue.fields.created,\n    updated_at: JIRAIssue.fields.updated,\n    done: issue.fields.status.statusCategory.name === 'Done' ? true : false,\n    assignee: jiraToGitlabUser(JIRAIssue.fields.assignee ),\n    reporter: jiraToGitlabUser(JIRAIssue.fields.reporter),\n    comments: JIRAComments.map(JIRAComment => ({\n      author: jiraToGitlabUser(JIRAComment.author),\n      comment: JIRAComment.body,\n      created_at: JIRAComment.created\n    })),\n    attachments: JIRAAttachments.map(JIRAAttachment => ({\n      author: jiraToGitlabUser(JIRAAttachment.author),\n      filename: JIRAAttachment.filename,\n      content: JIRAAttachment.content,\n      created_at: JIRAAttachment.created\n    }))\n};\n```\n\nNow our GitLab issue is created, all what we need to do is to post it:\n\n```\naxios.request({\n  method: 'post',\n  url: `${GITLAB_URL}/api/v4/projects/${encodeURIComponent(GITLAB_PROJECT)}/issues`,\n  // the GitLab issue that we have just created\n  data: GITLAB_ISSUE\n  headers: {\n    'PRIVATE-TOKEN': GITLAB_TOKEN\n  }\n})\n```\n\nAs you can see, migrating your JIRA tickets to GitLab is all about some REST API calls. As a developer, I think that you do such REST API calls every day. So we really do not need to stuck with JIRA nor to add it as a plugin to GitLab.\n\nIf you think that this article helps you discover something interesting that you feel you want to do every day, so please do not hesitate and join us. We are looking for new talents. For more information, you can have a look at our [Job site](https://job.linagora.com/en/).\n\n\nThis post originally appeared on _[Medium](https://medium.com/linagora-engineering/gitlab-rivals-winter-is-here-584eacf1fe9a)_.\n\n### About the Guest Author\n\nAbdulkader Benchi is the Javascript team leader at [Linagora](https://linagora.com/careers).\n",[232,9],{"slug":2939,"featured":6,"template":699},"migrating-your-jira-issues-into-gitlab","content:en-us:blog:migrating-your-jira-issues-into-gitlab.yml","Migrating Your Jira Issues Into Gitlab","en-us/blog/migrating-your-jira-issues-into-gitlab.yml","en-us/blog/migrating-your-jira-issues-into-gitlab",{"_path":2945,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2946,"content":2952,"config":2957,"_id":2959,"_type":13,"title":2960,"_source":15,"_file":2961,"_stem":2962,"_extension":18},"/en-us/blog/migrating-your-version-control-to-git",{"title":2947,"description":2948,"ogTitle":2947,"ogDescription":2948,"noIndex":6,"ogImage":2949,"ogUrl":2950,"ogSiteName":685,"ogType":686,"canonicalUrls":2950,"schema":2951},"Migrating your version control to Git? Here’s what you need to know","Change is hard, but moving to Git doesn’t have to be if you read these tips.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681731/Blog/Hero%20Images/migrategit.jpg","https://about.gitlab.com/blog/migrating-your-version-control-to-git","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Migrating your version control to Git? Here’s what you need to know\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2020-11-12\",\n      }",{"title":2947,"description":2948,"authors":2953,"heroImage":2949,"date":2954,"body":2955,"category":718,"tags":2956},[852],"2020-11-12","\n\nDeciding to make sweeping changes that’ll affect your entire organization is a nerve-wracking experience, because until you see the change in action, you don’t know whether it’ll be a success or a disaster. Migrating from one [version control](/topics/version-control/) to Git is just that type of change that can make team members and leaders feel overwhelmed. However, advanced knowledge helps teams prepare and transition more smoothly. Here are a few tips to help you make the change.\n\n## Keep your previous version control system\n\nIf you perform a tip migration and copy over only the most recent commits, teams will still need access to the previous version control to consult project history. Set the old version control system to read-only and place a breadcrumb trail in Git to help developers find the information they need in the previous version control. Retaining the old version control preserves history and enables new team members to find important information, which may be lost as veteran contributors move to different roles or forget code specifics.\n\n## Clone your previous version control\n\nBefore making a sudden shift to a new version control, create a mirror of your previous system to test out whether your current processes work with Git or you need to make adjustments. Continuous integration, code review, security testing, and release processes should all be tested with the clone so that the complications can be remedied before the entire workflow breaks down.\n\n## Invest in learning Git\n\nAlthough Git is the most popular and widely-used version control, it’s also known for its initial degree of difficulty. Developers who are new to Git may struggle with the command line and find [branching](https://learngitbranching.js.org/) tedious and confusing. Despite Git’s learning curve, its positive impact on productivity and code quality is worth the trouble, and teams can cope with these challenges by investing in training or identifying Git experts within the team to coach others. Team members may find it easier to work with a [GUI](https://git-scm.com/downloads/guis) rather than the command line, so using a strong tool could help ease the transition.\n\n## Identify a branching strategy\n\n![A diagram of colorful blocks representing code with connecting lines to represent branches and the flow](https://about.gitlab.com/images/blogimages/illustration_feature-branches.png){: .shadow.small.left.wrap-text}\n\nBefore [migrating to Git](https://git-scm.com/book/en/v2/Git-and-Other-Systems-Migrating-to-Git), it’s imperative to select a branching strategy and train the team on its specifics. Git is a distributed version control system and offers unparalleled workflow flexibility, which can either streamline or convolute development depending on whether a team identifies a single branching strategy. Without a strategy, team members could interfere with each other’s work and ship unfinished features. Collaborating through a single workflow keeps the codebase clean and helps team members maintain velocity. Git enables teams to approach software development through a variety of workflows to meet specific needs. Some branching strategies, such as [GitLab Flow](https://docs.gitlab.com/ee/topics/gitlab_flow.html), are more straightforward than others, so it’s important to research your team’s needs before deciding.\n\n## Read more about Git\n\nAccording to the [2020 DevSecOps Survey](/developer-survey/), Git is the choice for source control for 92% of the survey takers, with just 2% using no source control and even smaller percentages using Azure DevOps Server and Subversion. Here are few additional posts to help you get the most out of Git.\n\n- [15 Git tips to improve workflow](/blog/15-git-tips-improve-workflow/)\n- [How Git Partial Clone lets you fetch only the large file you need](/blog/partial-clone-for-massive-repositories/)\n- [A guide to Git for beginners](/blog/beginner-git-guide/) \n\nCover image by [Belinda Fewings](https://unsplash.com/@bel2000a?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/1Spvd7ktFX4)\n{: .note}\n",[1684,9,696],{"slug":2958,"featured":6,"template":699},"migrating-your-version-control-to-git","content:en-us:blog:migrating-your-version-control-to-git.yml","Migrating Your Version Control To Git","en-us/blog/migrating-your-version-control-to-git.yml","en-us/blog/migrating-your-version-control-to-git",{"_path":2964,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2965,"content":2970,"config":2975,"_id":2977,"_type":13,"title":2978,"_source":15,"_file":2979,"_stem":2980,"_extension":18},"/en-us/blog/modernize-your-ci-cd",{"title":2966,"description":2967,"ogTitle":2966,"ogDescription":2967,"noIndex":6,"ogImage":1171,"ogUrl":2968,"ogSiteName":685,"ogType":686,"canonicalUrls":2968,"schema":2969},"3 CI/CD challenges to consider","If these DevOps challenges hit close to home, the right CI/CD could be the answer.","https://about.gitlab.com/blog/modernize-your-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 CI/CD challenges to consider\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-06-05\",\n      }",{"title":2966,"description":2967,"authors":2971,"heroImage":1171,"date":2972,"body":2973,"category":718,"tags":2974},[1113],"2019-06-05","\n[Continuous integration and delivery](/solutions/continuous-integration/) helps DevOps teams ship higher quality software, faster. But is all [CI/CD](/topics/ci-cd/) created equal? What does successful CI/CD implementation look like and how do you know you’re on the right track?\n\nIn this four-part series, we talk about modernizing your CI/CD: Challenges, impact, outcomes, and solutions. Today, we’ll focus on [DevOps](/topics/devops/) challenges and situations where a comprehensive CI/CD approach could be the answer you’ve been looking for.\n\nIf these problems hit a little too close to home, stay tuned for part two where we dive deeper into how these roadblocks impact the rest of the SDLC.\n\n## What challenges do I face?\n\n### 1. Maintenance and integration costs, predominantly human resources costs.\n\nA large percentage of the overall IT budget goes to support teams of engineers needed to integrate and maintain a complex toolchain. An enterprise company with 1,000 developers could need up to 40 engineers just to maintain the DevOps toolchain instead of allocating these resources towards delivering business value.\n\n### 2. Development is slowed/blocked by the operations team.\n\nThe quintessential challenge of the pre-DevOps world is that dev teams are incentivized to increase innovation velocity by shipping new features. Operations teams are incentivized for stability, uptime, and error reduction. The higher the development velocity, the greater the chance for downtime and errors – so these teams are naturally at odds with each other. Dev leaders don’t always have enough enticing evidence or incentive to go to the Ops team to advocate for increased deployment velocity, and vice versa.\n\n### 3. Developers doing ops.\n\nToday, teams and individual developers base the code they produce on the capabilities of their environment rather than the needs of the business.\n\n## What do these look like in practice?\n\n### A big portion of resources and budget goes to undifferentiated integration and maintenance.\n\nTeams are siloed by their tools – each team has their favorite and is optimized to work within these specialized tools only. It is difficult to collaborate and troubleshoot across the stack due to a lack of visibility.\n\n### Code sometimes never gets to production at all.\n\nThere is a delay between code being written and driving value. When problems or errors arise and need to be sent back to the developer, it becomes difficult to troubleshoot because the code isn’t fresh in their mind (context switching). They have to stop working on their current project and go back to the previous code to troubleshoot. So much time might have passed that the code is no longer deployable in its current state. In addition to wasting time and money, this is demoralizing for the developer who doesn’t get to see the fruit of their labor.\n\n### Developers worry about environments, not business logic.\n\nEnvironment dependencies and configuration distracts developers from tasks they’re better equipped to handle. They may even be spending time trying to decide what size VM they need to deploy to. In this order “DevOps” means “Developers have to do both dev and ops.” Only a small percentage of developers actually enjoy this arrangement with most asking, “I’m a developer, please stop asking me to do operations.”\n\nIf you’ve already implemented CI/CD but are still experiencing these roadblocks, it might be time to modernize your CI/CD. We invite you to compare GitLab CI/CD to other CI tools and see why we were rated #1 in the Forrester CI Wave™.\n\n[Explore GitLab CI/CD](/solutions/continuous-integration/)\n{: .alert .alert-gitlab-purple .text-center}\n\nPhoto by [Jungwoo Hong](https://unsplash.com/photos/cYUMaCqMYvI?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/arrow?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[722,108,9],{"slug":2976,"featured":6,"template":699},"modernize-your-ci-cd","content:en-us:blog:modernize-your-ci-cd.yml","Modernize Your Ci Cd","en-us/blog/modernize-your-ci-cd.yml","en-us/blog/modernize-your-ci-cd",{"_path":2982,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2983,"content":2989,"config":2995,"_id":2997,"_type":13,"title":2998,"_source":15,"_file":2999,"_stem":3000,"_extension":18},"/en-us/blog/move-to-distributed-vcs",{"title":2984,"description":2985,"ogTitle":2984,"ogDescription":2985,"noIndex":6,"ogImage":2986,"ogUrl":2987,"ogSiteName":685,"ogType":686,"canonicalUrls":2987,"schema":2988},"Why you should switch to distributed version control","We share a few reasons why high-performing software development teams use distributed version control systems over centralized version control.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681766/Blog/Hero%20Images/distributedvcs.jpg","https://about.gitlab.com/blog/move-to-distributed-vcs","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why you should move from centralized version control to distributed version control\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2020-11-19\",\n      }",{"title":2990,"description":2985,"authors":2991,"heroImage":2986,"date":2992,"body":2993,"category":718,"tags":2994},"Why you should move from centralized version control to distributed version control",[852],"2020-11-19","\n\nDistributed version control has the power to increase collaboration and streamline development, but many teams are still using a centralized [version control system](/topics/version-control/) that prevents them from reaching their full development potential. If your team uses a centralized version control system, velocity, code quality, and collaboration aren’t at the same levels of high-performing teams that consistently deliver valuable products at rapid speeds. Using a [version control](/topics/version-control/) system isn’t enough to stay competitive in today’s market - you have to use the best tools available.\n\n## What is version control?\n\nVersion control lets software development teams build up communication and collaboration while continuously making and tracking changes to source code. Sometimes called code revision control, version control exists as a safety net to protect the source code while giving the development team the flexibility to experiment without worrying about causing damage or creating code conflicts. A version control system can be local, centralized, or distributed based on organizational needs.\n\n## Centralized version control: A relic from the past\n\nA centralized version control system relies on a central server where developers commit changes. Users like centralized systems, because they’re simple to set up and provide admins with workflow controls. Centralized vcs, like Subversion, CVS, and Perforce, solve the age-old problem of manually storing multiple copies on a hard drive, but the few benefits don’t outweigh what’s at risk from relying on a [single server](https://git-scm.com/book/en/v2/Getting-Started-About-Version-Control).\n\nIf the only copy of a project becomes corrupted or goes down, developers are unable to access the code or retrieve previous versions. Also, remote commits are extremely slow, because users must commit through a network to the central repository, which can slow down systems. Users must also be in network to push changes, limiting where and when developers can commit. Merging and branching are also difficult and confusing, since contributors have to track merges and branch as a single check-in.\n\n## Distributed version control: The key to rapid software development\n\nUnlike a centralized version control system, a distributed version control doesn’t have a single point of failure, because developers clone repositories on their distributed version control workstations, creating multiple backup copies. If the [source code](/solutions/source-code-management/) is corrupted, teams can use any developer’s clone as a backup, increasing security since there’s little risk of losing a project’s entire history. \n\nAlso, because there are local copies, developers can commit offline, which offers flexibility in their personal workflow and prevents having to commit as a giant changeset. Distributed version control, such as Git, Bazaar, and Mercurial, offers fast [branching](/topics/version-control/what-is-git-workflow/), because there’s no communication with a remote server - everything is done on a local drive.\n\nAre you ready for a quick look at Git, the most popular distributed version control system? [Brendan O’Leary](/company/team/#brendan), senior developer evangelist, explains Git basics to help teams get started in the video below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/9oDNBuive-g\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThe biggest challenge to switching to a distributed version control system is the learning curve. Teams will be able to ship higher quality code at new speeds using a distributed version control.\n\n## Core benefits of a distributed version control system\n\nA [distributed version control system](/topics/version-control/benefits-distributed-version-control-system/) is like each team member having a second set of hands to catch problems, introduce fast fixes, and execute fast merging with fewer conflicts. Additionally, it makes the collaboration process hyper-efficient, thereby letting DevOps teams work asynchronously. Version control empowers teams to collaborate and streamline software development to resolve pain points and create a centralized location for code.\n\n## Popular distributed version control systems (e.g. Git)\n\nThe three most well-known options are Git, SVN, and Mercurial. The most popular of these options is Git, which is an open-source distributed system that is used for any size software project. \n\nGit offers tons of features and benefits, including:\n\n* Strong support for non-linear development.\n\n* Works with popular protocols/systems including HTTP, FTP, and SSH.\n\n* Offers GIT GUI, which allows for fast re-scan, state change, sign off, commit & push the code quickly with low friction.\n\n* It can handle any size project.\n\n* Can function across platforms.\n\n* Toolkit-based design.\n\n* Rapid and efficient performance.\n\n* Code changes are easily tracked and managed.\n\nWhen choosing a version control system, make sure to evaluate all options to find the best fit for your team.\n\nCover image by [Hans-Peter Gauster](https://unsplash.com/@sloppyperfectionist?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/3y1zF4hIPCg)\n{: .note}\n",[1684,9,696],{"slug":2996,"featured":6,"template":699},"move-to-distributed-vcs","content:en-us:blog:move-to-distributed-vcs.yml","Move To Distributed Vcs","en-us/blog/move-to-distributed-vcs.yml","en-us/blog/move-to-distributed-vcs",{"_path":3002,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3003,"content":3009,"config":3015,"_id":3017,"_type":13,"title":3018,"_source":15,"_file":3019,"_stem":3020,"_extension":18},"/en-us/blog/on-calliday-unsucking-your-on-call-experience",{"title":3004,"description":3005,"ogTitle":3004,"ogDescription":3005,"noIndex":6,"ogImage":3006,"ogUrl":3007,"ogSiteName":685,"ogType":686,"canonicalUrls":3007,"schema":3008},"On-Calliday: A guide to unsucking your on-call experience","Being on-call can be rough because you're likely losing sleep, which can impact your personal and professional life. Here are some tips on how to make on-call shifts less painful for your team and company.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680447/Blog/Hero%20Images/on-calliday.jpg","https://about.gitlab.com/blog/on-calliday-unsucking-your-on-call-experience","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"On-Calliday: A guide to unsucking your on-call experience\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Amanda Folson\"}],\n        \"datePublished\": \"2017-06-14\",\n      }",{"title":3004,"description":3005,"authors":3010,"heroImage":3006,"date":3012,"body":3013,"category":811,"tags":3014},[3011],"Amanda Folson","2017-06-14","\nIn spirit of the rapidly approaching summer-vacation season, here are some tips on how to prevent burnout when scheduling on-call rotations. Although I'm currently a developer advocate, I've been a career developer and worked in DevOps roles, and I'm no stranger to the on-call life.  Here I'll discuss burnout, the pros and cons of different shift lengths, and how to make on-call rotations a little less painful.\n\n\u003C!-- more -->\n\n## Four phases of burnout\n\nFirst, let's talk about burnout, because this is what we’re trying to prevent.\nEspecially in tech, people may respond to the demands of their job by staying late to get stuff done, or forgoing vacation days because even the prospect of catching up upon return is daunting. It's worth remembering that work can actually kill you, and there's a lot of stigma around this kind of stress, so it's important to talk about.\n\nThese four stages are for employees and employers alike to keep tabs on yourself and your team.\n\n### Caution\nYou feel like you’re not providing value so you try to prove yourself by working more. You might feel down on yourself.\n\n### Warning\nYou start to ignore your own needs in favor of working. Sleep, family, and hobbies become secondary priorities to you. You might panic. You work all the time and sleep like crap.\n\n### Danger\nThis is the point where you need to REALLY start seeking help. Your behavior starts to change at this stage. You might become aggressive or withdraw from serious commitments and social functions, or start engaging in risky behavior. You might be so anxious about all of the work you have to do that you end up not doing anything at all.\n\n### Emergency  \nIf you’re in this zone you need to seek help immediately. In this stage you might feel empty and engage in even riskier behaviors. Many people are depressed at this stage, and it’s not uncommon for people to have suicidal thoughts.\n\n## How can we make on-call shifts better?\n\nTeam members can protect themselves from burnout by making sure everything is in order before their shift. For example, make sure to pay important bills, run errands you’ve been putting off, and do anything else you can to simplify your work week. In terms of making your team work better as a whole, here are some additional best practices you can consider enforcing:\n\n### Don't make the pain in vain\nA chance of being woken up in the middle of the night is never going to be amazing. You can do a lot to decrease the likelihood of it. If you have to be woken up, make it worth the pain and make the time count.\n\n### Make the data count\nMany companies rely on their on-call employees being woken up, and the buck stops there. They have basic monitoring set up but don’t do anything with the data. You should be auditing the information collected during on-call shifts: do root cause analyses, talk about issues, and look for patterns. If you notice that something happens at 2am every few days, you can dig in and fix that.\n\n### Find the best tool for the job\nMany tools exist to help you manage complex scheduling and data aggregation. There are plenty of alternatives, so definitely find one that works for you. Every single one of them is designed to tell you when people are getting woken up and what’s waking them up.\n\n### Keep your staff sharp\nRun drills where you knock things over in a controlled environment and practice putting out those fires.\n\n### Learn how to do incident response\nYou can learn a lot from actual firefighters. I learned a lot from 3 guys at Blackrock, who were actual firefighters turned ops guys who go around teaching ops orgs how to handle incidents better. When there’s a fire, there’s an incident commander, who is in charge of directing everyone else. Rank isn't important here; this person does not have to be manager, they should just be responsible for checking in on everyone for status updates. This person also assigns a scribe to take notes if necessary, although it's better to record calls if you can for better learnings later.\n\n### Implement \"you write it, you wear it\"\nIf you do nothing else in this list, do this. The people who are writing the code, deploying the infrastructure, or touching the guts should be involved in the on-call rotation somehow. These are the best people to fix issues - they’re the ones that know it inside and out. If you don’t have these people on-call, I’m going to boldly say you’re doing it wrong.\n\n### Set better schedules\nTry to start and end your on-call rotations in the middle of the day to give staff an opportunity to go over any problems or questions they experienced on shift. Starting and ending your shifts mid-week is also ideal, since it avoids many bank holidays. Try never to start or end a shift on a holiday, and if you have to have someone on-call on a holiday, it's important to share the load across the team if you can so that one person isn’t on-call the whole day.\n\n### Make people take vacation (!!!)\nOn a related note, employers should keep track of how many people are taking vacation and when. Force people to actually take vacation if you need to - this will make the team as a whole healthier and better when on their shift.\n\n## Which shift length is right?\n\nThere’s no one-size-fits-all solution to scheduling, but I typically tell people to not do weekly rotations unless they have mature monitoring in place. It’s better to proactively monitor and adjust schedules as needed. Think of schedules as a living calendar that’s flexible and open to improvement, rather than using the “set it and forget it” approach. People are dynamic and their needs change, so your schedule should reflect that. Here are a few examples of common shift length:\n\n### 8 hours\nThis is great for people who are covering a business day. The shift might start when someone comes in and end when they leave - or up to 3 hours after leaving - before another team takes over. Extend by 3 hours after they leave so that work they did during the day has time to settle. This length is useful for people who are doing deploys during the day as they’re around to fix issues that arise without anyone else getting paged for it.\n\n### 12 hours\nThis shift length is ideal for people who are covering an overnight. Try \"follow-the-sun\" rotations, which means exactly what you'd expect: Everyone is on-call during their local business hours. Someone starts at 9am, someone starts at 9pm - this still allows for a hand-off and isn’t in the middle of the night.\n\n### 24 hours\nA 24-hour shift is really common and relatively low stress if you have several people on a team. This prevents anyone from having a “rough week” - there's equal opportunity for everyone to have a rough night. The shift is over before you know it.\n\n### 1 week\nThis is typical for small and large teams, and is great if you want to have longer periods of rest between shifts. If you have 4 people, this schedule means each team member is \"off-call\" for 3 weeks at a time. However, having a week long shift feels really long, particularly if stuff is on fire multiple nights. This is the schedule most likely to lead to burnout.\n\nAs you look at your team's summer schedule, I hope this guide helps ameliorate any dread you have about being on-call. Have any questions I didn't address here? Comment here or tweet me [@AmbassadorAwsum](https://twitter.com/ambassadorawsum).\n",[9,1241,2744],{"slug":3016,"featured":6,"template":699},"on-calliday-unsucking-your-on-call-experience","content:en-us:blog:on-calliday-unsucking-your-on-call-experience.yml","On Calliday Unsucking Your On Call Experience","en-us/blog/on-calliday-unsucking-your-on-call-experience.yml","en-us/blog/on-calliday-unsucking-your-on-call-experience",{"_path":3022,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3023,"content":3029,"config":3035,"_id":3037,"_type":13,"title":3038,"_source":15,"_file":3039,"_stem":3040,"_extension":18},"/en-us/blog/optimizing-the-value-exchange-a-gentle-introduction",{"title":3024,"description":3025,"ogTitle":3024,"ogDescription":3025,"noIndex":6,"ogImage":3026,"ogUrl":3027,"ogSiteName":685,"ogType":686,"canonicalUrls":3027,"schema":3028},"How to optimize your value exchange system","Part one of a pragmatic, business-driven guide to help teams transition from fixating on output to optimizing the value exchange with their customers.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672701/Blog/Hero%20Images/post-1-cover.jpg","https://about.gitlab.com/blog/optimizing-the-value-exchange-a-gentle-introduction","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to optimize your value exchange system\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabe Weaver\"}],\n        \"datePublished\": \"2019-12-16\",\n      }",{"title":3024,"description":3025,"authors":3030,"heroImage":3026,"date":3031,"body":3032,"category":1239,"tags":3033},[2315],"2019-12-16","*Reading time: 8 minutes, 28 seconds*\n\n## The problem \n\nBuilding software products is **really** hard. Building an enduring company is even harder. We are constantly looking to [hire](https://www.christenseninstitute.org/jobs-to-be-done/) solutions to help us do this quickly and with as little risk as possible. Companies hire Agile ([R.I.P](https://pragdave.me/blog/time-to-kill-agile.html)) because they believe it will help them be more adaptable, increase productivity, and accelerate software delivery. They hire DevOps as a natural companion to increase speed, reduce defects, safely deploy code, and improve the resilience of infrastructures. \n\nWhile these solutions are directionally correct, we have a long way to go:\n\n- Software failures cost the U.S. economy over [$1t annually](https://www.cloudcomputing-news.net/news/2017/oct/30/glitch-economy-counting-cost-software-failures/).\n- Only 56% of startups [make it to their fourth year](https://smallbiztrends.com/2019/03/startup-statistics-small-business.html).\n- [~40%](https://newproductsuccess.org/new-product-failure-rates-2013-jpim-30-pp-976-979/) of new products fail.\n- Only [6% of teams](https://www.stateofagile.com/#ufh-i-521251909-13th-annual-state-of-agile-report/473508) report that Agile (long live agility) practices are enabling greater adaptability to changing market conditions.\n\nTo get better, we need to fundamentally shift the way we approach building software products and companies. Through a fictitious story about a company called Acme Co., I'm going to provide a pragmatic, business-driven approach of how we can get there. The first step is to change how we define success. We need to move from **output to outcomes**. \n\n## The basics of the value exchange system\n\nI've worked with a lot of companies over the years. Most of them measure the success of product teams by how much they ship. In [Escaping The Build Trap](https://melissaperri.com/book), Melissa Perri correctly identifies the problem with this and the root cause -- companies misunderstand value. As she succinctly describes in her book:\n\n> Instead of associating value with the outcomes they want to create for their businesses and customers, they measure value by the number of things they produce. Let’s go back to the basics to determine what true value is. Fundamentally, companies operate on a value exchange. \n\nShe uses a simple diagram to illustrate the value exchange system:\n\n\u003Cbr>\n![A Simplified Value Exchange System Diagram](https://about.gitlab.com/images/blogimages/optimizing-the-value-exchange/value-exchange-simple-diagram.png)\n\n\u003Cbr>\nCompanies build products to serve as vehicles for value delivery. When the customer's problems, needs, and wants are fulfilled, they provide value back to the business. Business value is easy to define, as it typically maps to achieving traditional objectives that are universal to all companies:\n\n- **Sustainable Value:** Support the product's core value and create barriers to competition.\n- **Growth:** Grow market share, fulfill more demand, develop new markets, and improve recurring revenue.\n- **Profit:** Support higher prices, improve lifetime value, lower costs, and leverage existing assets.\n\nWhile these are easily measurable, customer value is often more intangible; making it difficult to define and correlate to business objectives. This leads companies to create proxies to represent value that are more straightforward to comprehend and measure -- such as things shipped.\n\nAs we discussed earlier, companies hire solutions like Agile (long live agility) and DevOps because they want to increase the speed and productivity of their value delivery system. The Value Exchange System is a [reinforcing loop](https://thesystemsthinker.com/reinforcing-and-balancing-loops-building-blocks-of-dynamic-systems/), so it naturally follows that if you increase the speed at which you deliver value, you will, therefore, increase the amount of value you capture from your customers. Companies are so fixated on optimizing for speed that a whole market of productivity analytics is emerging to track and report it. For example, the hook on the landing page of a prominent tool on the market promises to help you \"measure your team's success\" by surfacing velocity data to see how fast your team is going. \n\nWhile speed and productivity are good things, optimizing for them in a silo will have a minimal overall impact on increasing the positive reinforcement to the system. This is because output does not necessarily result in outcomes. We need to optimize both how we deliver _and_ capture value. To better understand why, let's jump into an all so common dilemma Acme is currently trying to overcome.\n\n## Let's Meet Acme Co.\n\n> “Bounded rationality means that people make quite reasonable decisions based on the information they have. But they don’t have perfect information, especially about more distant parts of the system.” -- [Thinking In Systems](https://www.amazon.com/Thinking-Systems-Donella-H-Meadows/dp/1603580557)\n\nAcme is a tech company that provides IoT devices and AI-driven insights to help logistics companies improve the efficiency of their operations. Acme has seen remarkable success over the last several years by leveraging a common growth strategy of steadily increasing investment in R&D, marketing, and selling to capture market share as quickly as possible. While not yet profitable, they believe there is an imminent inflection point where their investment in R&D will begin to pay off, resulting in revenue growth disproportionate to operating expenses. This has been playing out as expected, but the executive team is starting to get concerned given some financial trends over the last few quarters. \n\n| | Q1 | Q2 | Q3 | Q4 |\n| --- | --- | --- | --- | --- |\n| Revenue | 36.7 | 44.0 | 48.4 | 50.9 |\n| COGS | 2.2 | 4.0 | 4.8 | 4.6 |\n| **Net Sales** | **34.5** | **40.1** | **43.6** | **46.3** |\n| *Margin* | *94%* | *91%* | *90%* | *91%* |\n|  |  |  |  |  |\n| R&D | 16.5 | 18.3 | 20.3 | 22.7 |\n| Marketing | 4.1 | 5.2 | 6.6 | 7.4 |\n| Sales | 15.0 | 17.8 | 19.9 | 21.2 |\n| G&A | 1.6 | 2.1 | 2.3 | 2.9 |\n| **Total OpEx** | **37.2** | **43.4** | **49.1** | **54.2** |\n| |  |  |  |  |\n| **Net Profit** | **-2.7** | **-3.3** | **-5.5** | **-7.9** |\n| *Profit Margin* | *-7%* | *-8%* | *-11%* | *-16%* |\n\nAcme's board set a -20% acceptable risk threshold for the net profit margin. It believes, that if necessary, it will be able to quickly reduce the deficit through strategic cost-cutting measures. It's clear from the financials that the company is on course to hit the threshold and the executives are scrambling to understand why. They have been easily converting customers from their competitors and are nowhere close to saturating their addressable market. They are investing heavily in delivering new product capabilities and scaling the sales organization to capture the value in return. The executive team decides to create a dedicated cross-functional working group to investigate and solve the problem. \n\n## Practicing kaizen with the improvement kata\n\nThe challenge before the working group is daunting. Acme is an incredibly complex system with hundreds of people and moving parts. They decide to adopt a management processes from the Toyota Product System developed by Taiichi Ohno. The group reviews what it means to practice continuous improvement - [kaizen](https://en.wikipedia.org/wiki/Kaizen) - by employing a technique called the Improvement Kata.\n\n![Diagram of the Improvement Kata](https://about.gitlab.com/images/blogimages/optimizing-the-value-exchange/toyota-kata.png){: .center}\n\nThe first thing the group needs to decide is which direction to focus on first. If the challenge is to increase profits, that could be accomplished in a few different ways - increase the growth rate of revenue or decrease operating expenses. Looking at the financials, revenue growth has declined from an average of 20% per quarter to only 5% in the most recent. Given Acme has a sizeable market left to capture, and decreasing R&D spend at this point could hurt their long term growth targets, the group sets a target condition of getting revenue growth back to 10%. This feels like an obtainable goal within the quarter and will demonstrate forward progress to the executives.\n\nWith a target condition in hand, their next task is to identify the root cause of the decline in revenue growth and conduct experiments to reach their goal. They set out to connect with the sales team to begin their investigation. \n\n##  Why is revenue growth declining?\n\nAs the working group sits down with the sales team to review numbers, they use the [5 why's](https://medium.com/productmanagement101/learn-about-the-five-whys-technique-78283d75800f) to try to understand where things are going wrong:\n\n- **Why is revenue growth declining?** The sales team shares that conversion rates from new revenue are holding steady, but their conversion rates from up-sells have fallen dramatically.\n- **Why are up-sells declining?** According to the sales team, Acme is not delivering capabilities fast enough that it had promised during the sales cycle.\n- **Why did the sales team set expectations for capabilities that weren't built yet?** The sales team explained that enterprise customers have complex needs that aren't supported in Acme's core product capabilities yet. To keep up with sales quotas, they found that walking prospects through Acme's product roadmap usually gets the deal over the line. Based on the roadmap and the planned release dates, they had not seen this as a risk because things were expected to be delivered within the customer's acceptable time ranges. \n- **Why weren't things delivered within the customer's acceptable time range?** In further discussions, the sales team reveals that they feel R&D is delivering new features at an increasingly slower rate quarter over quarter.\n- **Why is R&D slowing down on value delivery?** At first glance, R&D's output metrics are consistent month-over-month relative to headcount. While the sales team helped the working group see that lost opportunities in up-sells were driving lower growth rates; it didn't make sense. The group decides to head over to R&D to better understand the value delivery system.\n\n## The importance of measuring the value delivery stream\n\nTo prepare for collaborating with the R&D team, the working group spends a few minutes reviewing the team's productivity metrics. To the surprise of the group, R&D only has one primary metric everyone on the team tracks consistently - the count of work items delivered. As the working group starts discussions with R&D, they explain that the executives set output objectives as a means of measuring the team's success. The rationale for this was due to the way the product is bundled and priced. The categories all roll up into one pricing model and it was difficult for the finance team to figure out a way to attribute revenue to the various categories. They had ultimately decided that measuring productivity was the next best thing.    \n\nSo if work item output is the goal and R&D's output has been consistent quarter over quarter, why was the sales team convinced R&D was slowing down on value delivery? To understand this, the working group needed to better understand the flow of items through the value delivery process within the Value Exchange System. To do this, they generated a simplified value stream map to visualize the stages a work item goes through as it is converted from requirements to a production feature.  \n\n\u003Cbr>\n\n![Acme's Value Stream Map](https://about.gitlab.com/images/blogimages/optimizing-the-value-exchange/d1.png){: .center}\n\n_Acme's Value Stream Map. Cycle Time = Time in queue + active time + time waiting once started_\n\nThe value stream map reveals that the total lead time for a work item is 2,132 hours (88 days). This is an astonishing revelation to the working group; especially since they didn't include the time a work item spends waiting between when a customer requests a feature and the team starts the planning process. Even though the R&D team delivered ~3,100 work items last month, it took well over three months to satisfy customer requests. The working group needs to collect more data, but they know they are on the right track. Before synthesizing a hypothesis, they finish collecting additional historical metrics to confirm their suspicions. \n\n| | Q1 | Q2 | Q3 | Q4 |\n| --- | --- | --- | --- | --- |\n| Items Delivered | 2,275 | 2,524 | 2,800 | 3,131 |\n| Lead Time | 31 | 52 | 69 | 88 |\n| Cost Per Item Delivered | 7,250 | 7,250 | 7,250 | 7,250 |\n\nBased on the data, the working group notices the strong correlation between the increase in lead time and the decrease in up-sell revenue growth. Given their target condition for the upcoming quarter, they create the following hypothesis: \n\n> Decreasing the lead time by ~35% will enable customer requests to be completed 18 days earlier, resulting in a 5% increase in revenue growth\n\nWith a falsifiable hypothesis in hand, they shift their attention to figuring out the best approach for running an experiment. \n\nContinue reading:\n\n- Part 1: A Gentle(ish) Introduction\n- Part 2 (Next): [Reduce Waste To Increase Flow](/blog/optimizing-the-value-exchange-reduce-waste-to-increase-flow/\n)\n- Part 3: [The Compounding Value Of Shorter Feedback Loops](/blog/optimizing-the-value-exchange-the-compounding-value-of-shorter-feedback-loops/)\n\n ***Giving credit where it is due:** In [Escaping The Build Trap](https://www.oreilly.com/library/view/escaping-the-build/9781491973783/ch01.html), Melissa Perri discusses the Value Exchange System at length and provides unparalleled wisdom for modern-day product managers. The Improvement Kata, Kanban, and Toyota Production System would not exist today if it weren't for Taiichi Ohno. His work has been foundational to many other systems and processes that have evolved over the years.* \n\nCover Photo by Cristina Gottardi on [Unsplash](https://unsplash.com/photos/boxam4k4rQw)\n",[744,3034,9],"startups",{"slug":3036,"featured":6,"template":699},"optimizing-the-value-exchange-a-gentle-introduction","content:en-us:blog:optimizing-the-value-exchange-a-gentle-introduction.yml","Optimizing The Value Exchange A Gentle Introduction","en-us/blog/optimizing-the-value-exchange-a-gentle-introduction.yml","en-us/blog/optimizing-the-value-exchange-a-gentle-introduction",{"_path":3042,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3043,"content":3049,"config":3053,"_id":3055,"_type":13,"title":3056,"_source":15,"_file":3057,"_stem":3058,"_extension":18},"/en-us/blog/optimizing-the-value-exchange-reduce-waste-to-increase-flow",{"title":3044,"description":3045,"ogTitle":3044,"ogDescription":3045,"noIndex":6,"ogImage":3046,"ogUrl":3047,"ogSiteName":685,"ogType":686,"canonicalUrls":3047,"schema":3048},"Optimizing The Value Exchange: Reduce Waste To Increase Flow","Part two of a pragmatic, business-driven guide to help teams transition from fixating on output to optimizing the value exchange with their customers.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672713/Blog/Hero%20Images/post-2-cover.jpg","https://about.gitlab.com/blog/optimizing-the-value-exchange-reduce-waste-to-increase-flow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Optimizing The Value Exchange: Reduce Waste To Increase Flow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabe Weaver\"}],\n        \"datePublished\": \"2019-12-16\",\n      }",{"title":3044,"description":3045,"authors":3050,"heroImage":3046,"date":3031,"body":3051,"category":1239,"tags":3052},[2315],"If you're new to the series, checkout Part 1: [A Gentle(ish) Introduction](/blog/optimizing-the-value-exchange-a-gentle-introduction/\n)\n\n_Reading time: 13 minutes, 32 seconds_\n\n## Learning To Identify Waste\n\n> “Systems behavior is particularly sensitive to the goals of feedback loops. If the goals-the indicators of satisfaction of the rules - are defined inaccurately or incompletely, the system may obediently work to produce a result that is not really intended or wanted.” -- [Thinking In Systems](https://www.amazon.com/Thinking-Systems-Donella-H-Meadows/dp/1603580557)\n\nSince following the Improvement Kata has gotten the group this far, they decide to use another concept from the Toyota Production System (TPS) -- waste reduction. To increase flow through the value delivery system, they start by looking at steps in the workflow that do not add any intrinsic value. TPS categorizes waste into 8 distinct types:\n\n- **Talent:** Underutilizing people's talents, skills, and knowledge.\n- **Inventory:** Excess products and materials not being processed.\n- **Motion:** Unnecessary movements by people.\n- **Waiting:** Wasted time waiting for the next step in a process. \n- **Transportation:** Unnecessary movements of products and materials.\n- **Defects:** Efforts caused by rework, scrap, and incorrect information.\n- **Overproduction:** Production that is more than needed or before it is needed.\n- **Overprocessing:** More work or higher quality than is required by the customer. \n\nWith this in mind, the working group pulls up their value stream map and immediately identifies a common theme across stages in the flow - some stages have a high amount of time spent waiting. The group quickly adds this up to understand the impact.\n\n![Acme's Value Stream Map - Time waiting](https://about.gitlab.com/images/blogimages/optimizing-the-value-exchange/d2.png){: .center}\n\nIn total, work items spend ~85 days waiting between queues or waiting for additional work needed from other parts of the R&D team. The group is speechless as they realize that it only takes around 3 days of actual labor to move a work item through the value delivery stream. They decide this is the right place to start and prioritize their efforts based on which cycles have the highest amount of time in waiting:\n\n1. **Build** - 863 hours in a queue, 613 hours once a work item has started.\n2. **Plan** - 362 hours once a work item has started.\n3. **Review** - 27 hours in a queue, 95 hours once a work item has started.\n4. **Canary** - 73 hours in a queue.\n5. **Prod** - 22 hours in a queue.\n6. **Staging** - 1 hour in a queue.\n\nThe group is excited about their discovery and is eager to share it with the engineering team. When they sit down and show them the numbers, the engineers are surprised. The conversation continues with the engineers talking through what it looks like for a single engineer in a typical week. The working group captures it in a diagram and confirms with the engineers that it is representative of the conversation.\n\n![Sequence Diagram](https://about.gitlab.com/images/blogimages/optimizing-the-value-exchange/sequence-diagram.png){: .center}\n\nThe group asks the engineers how they feel about this workflow. They're generally supportive as they don't see an alternative to this despite it being extremely heavy on the context switching. The working group still needs to figure out how to reduce the waiting time for work items in this stage and eliminated unneccessary waiting is a logical place to start. They present their target condition and ask the engineers for suggestions. The engineers feel strongly that the code review process is imperative to maintain code quality. One engineer suggests the team could try pair programming. He heard that it can increase code quality. It would also remove the Review stage entirely as two people contribute to the same work item until it is fully completed.\n\nThey debated back and forth. Many felt that it would slow everything down even more because they would only be able to do half the work they had been doing up to that point. The engineer who brought it up in the first place pointed to an a [experiment](https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20030012934.pdf) where NASA engineers completed two like-size projects - one as they normally would and one strictly following [Extreme Programming](http://www.extremeprogramming.org/); which includes a mix of practices like pair programming, collective code ownership, TDD, and small iterations. He presented the team with the following data:\n\n| | Non-XP Projet | XP Project |\n| --- | --- | --- |\n| LOC/Hour | 24 | 27 |\n| Util/test LOC | 0 | 1,135 |\n| Prod LOC | 2,144 | 912 |\n| Total LOC | 2,144 | 2,545 |\n\nHe explained that part of the reason why the team was able to produce less production code at a faster overall rate was due to the pair pushing each other not to cut corners by focusing on quality; ruthlessly refactoring code as they went. He also discussed the concept of the driver and navigator roles, and how that led to better architecture decisions the first time around. The engineers were skeptical but were willing to give it a try. Together, everyone agreed that the engineers would run an experiment to try pair programming for the upcoming quarter. They also agreed to only work on one item from start to merged before picking up the next item.\n\n## Increased flow does not equal increased value\n\nAt the end of the quarter, the working group got together to review an updated value stream map. \n\n![Acme's Value Stream Map - Updated](https://about.gitlab.com/images/blogimages/optimizing-the-value-exchange/d3.png){: .center}\n\nThe work in progress limits and pair programming dramatically improved flow through the value stream. Lead time dropped to 893 hours or 37 days. A 57% reduction was much better than the working group had anticipated. The books for the quarter weren't closed, but the working group knew it was going to be a huge win. They eagerly shared the results with the executive team.\n\nAs the quarterly financials were released, the group got a call from the CFO. He was disappointed. He had expected to not only achieve the 5% incremental revenue growth but do much better given the results of the experiment. The working group was stunned. Revenue growth only increased from 5% to 9%. Despite missing the mark, the executives agreed to continue sponsoring the working group as it did produce tangible results and business outcomes. \n\n**Financials**\n\n| | Previous | Current | \n| --- | --- | --- |\n| Revenue | 50.9 | 55.4 |\n| COGS | 4.6 | 5.0 |\n| **Net Sales** | **46.3** | **50.1** |\n| *Margin* | *91%* | *91%* |\n|  |  |  |\n| R&D | 22.7 | 23.1 | \n| Marketing | 7.4 | 8 |\n| Sales | 21.2 | 22.9 |\n| G&A | 2.9 | 3.8 |\n| **Total OpEx** | **54.2** | **57.8** |\n| |  |  |\n| **Net Profit** | **-7.9** | **-7.3** |\n| *Profit Margin* | *-16%* | *-13%* |\n\n\u003Cbr>\n**Value Delivery Economics**\n\n| | Previous | Current |\n| --- | --- | --- | \n| Items Delivered | 3,131 | 5,002 |\n| Lead Time | 88 | 37 |\n| Cost Per Item Delivered | 7,250 | 4,617 |\n\nThey combed through the financials and productivity metrics to figure out what had happened. The value delivery system had become much more efficient. The cost per item delivered decreased. What went wrong? They circled back to where they started with the sales team to ask for an update. The sales team reported that they still felt like speed on delivering customer requests was moving slowly. It had gotten a little bit better in a few areas, but overall it was having a nominal impact on fixing the problem of declining up-sell rates. Determined to get to the bottom of things, the working group decides to dig a bit deeper into the kind of work flowing through the R&D value stream. \n\nThis is what they discovered upon further investigation into the breakdown on types of work items R&D was delivering:\n\n| | Q2 | Q3 | Q4 | Q1 |\n| --- | --- | --- | --- |\n| Feature | 63% | 46% | 38% | 44% |\n| Defect | 13% | 22% | 29% | 36% |\n| Technical Debt | 24% | 32% | 33% | 20% |\n\nA light bulb clicked. No wonder the sales team felt like things were moving slowly. The amount of new value being delivered to customers was near an all-time low. It was certainly better than the previous quarter thanks to the improvement in lead time, but this trend was concerning to the group. \n\nAs they followed the same path from the first time around, they connected with the R&D team to ask questions. After presenting the work item breakdown by type, R&D was not surprised at all. They look at this ratio quite often. When the group asked them why they hadn't surfaced this earlier, they explained that the pace of constant feature prioritization over the last several years gave the team little extra time to refactor parts of the codebase that were critical to the value delivery system. \n\nIn continuing to listen intently, the team conveyed that they had brought this up on several occasions. Upper management insisted that the team focus on creating new value to fuel the growth targets set by the company leaders. The team also explained that because the company has SLAs with customers, they have had to prioritize severe defects over new features; leaving little time for anything else. \n\nA product manager stepped forward and openly confessed that he regularly makes pacts with his engineering manager to repay technical debt in short order if they would do whatever it took to get the feature shipped. He said they rarely followed through to go back and clean up technical debt. Everyone in the meeting confirmed that this was an acceptable practice. The working group started to see the long term consequences that are so easily masked by strong financial growth. As the R&D team cut corners to ship features, technical debt mounted. Unable to take the time to address it, defect count had started increasing at a consistent rate. The R&D team now had to split focus across three different areas - trying to keep the lights on, delivering new value, and trying to find time to repay the technical debt. \n\n![Technical Debt Diagram](https://about.gitlab.com/images/blogimages/optimizing-the-value-exchange/tech-debt.jpg){: .center}\n\nDespite this revelation, the working group was still responsible for delivering on their target condition. Now it was on them to convince leadership to take technical debt seriously. As they mulled over ways to present this, they asked for a report of all the inventory that was waiting to be produced. For this to resonate with the executives, they needed to communicate it in a way that provided justifiable business reasons to throttle back on feature development. They calculated the cost per quarter that the company was spending on work that did not add intrinsic customer value or positively reinforce the Value Exchange System.\n\n| | Type | Quarterly Cost |\n| --- | --- | --- |\n| Features | 38% | 8.8 | \n| Defects | 22% | 5.1 | \n| Tech Debt | 40% | 9.2 | \n\nThe company was investing $14.3 million in activities that did not have any tangible impact on driving revenue growth. This was something the leadership team could not ignore. As they presented their case, the executives were concerned that slowing down on feature development in any way would adversely impact revenue growth. The working group talked around this by helping them understand that slowing down for a little bit right now would enable them to accelerate much faster as they would be able to more than double their investment in value-adding features once defects and technical debt were in check. The leadership agreed to the working group's proposal. \n\nOver the next few quarters, R&D invested heavily in paying down technical debt and reducing defects. They also made optimizations to the value stream and were able to replace both the staging and canary environments by deploying directly to production behind feature flags. Product managers were able to test things out in production and as soon as a feature was ready for release, they clicked a button and it would become incrementally available to everyone. New defects were at an all-time low and the team was confident all of the efforts would produce huge dividends. \n\nThe working group double-checked the value stream in anticipation of reviewing the quarterly financials that contained the first full quarter where the team spent 80% of their time on value-adding features. \n\n![Acme's Value Stream Map - 80% features%](https://about.gitlab.com/images/blogimages/optimizing-the-value-exchange/d4.png){: .center}\n\n**Value Delivery Economics**\n\n| | W/ Tech Debt | 80% Features |\n| --- | --- | --- | \n| Items Delivered | 5,002 | 11,467 |\n| Lead Time | 37 | 11.9 |\n| Cost Per Item Delivered | 4,617 | 2,014 |\n\nThey were pleased with their value stream. It was fine-tuned, waste-free, and incredibly efficient.  Leads times were hovering around 286 hours (11.9 days). The company had continued to invest in scaling the R&D team and they were able to churn out an astonishing 11,467 work items. But then came the financials. \n\n| | W/ Tech Debt | Paying It Down | 80% Features | \n| --- | --- | --- | --- |\n| Revenue | 55.4 | 57.7 | 68.04 |\n| COGS | 5.0 | 5.2 | 6.1 |\n| **Net Sales** | **50.5** | **52.5** | **61.9** |\n| *Margin* | *91%* | *91%* | *91%* |\n|  |  |  | |\n| R&D | 23.1 | 25.3 | 27.2 | \n| Marketing | 8 | 8.9 | 9.6 |\n| Sales | 22.9 | 24 | 25.6 |\n| G&A | 3.8 | 5.1 | 6.4 |\n| **Total OpEx** | **57.8** | **63.3** | **68.8** |\n| |  |  | |\n| **Net Profit** | **-7.3** | **-10.8** | **-6.9** |\n| *Profit Margin* | *-13%* | *-19%* | *-10%* |\n\nThey only improved revenue growth rates by 9 points, from 9% to 18%. The working group was surprised the CFO hadn't called. When they checked their calendars, a week-long onsite meeting had been scheduled with heads from every division at the company. It wasn't all too surprising. They continued to fail to meet the mark, but could not figure out why. The cost per work item had been reduced by 72%. Investment in value-adding work items was at an unprecedented all-time high. The value delivery stream was a well-oiled machine, but growth rates weren't responding.\n\nAs the working group prepared to meet their fate, they decided to conduct a retrospective to try to gain an understanding of where they went wrong. During the conversation, one of the group members reminded the team of the earlier conversation with R&D discussing success metrics; where R&D recounted that Acme had never really figured out how to more formally tie business outcomes to the output they were creating. The group didn't know the answer, but they agreed to spend some time before the on-site to explore the topic.\n\nThe group talked to peers at other companies and read as many books as they could get their hands on. One of them was [The Goal](https://www.amazon.com/Goal-Process-Ongoing-Improvement/dp/0884271951/ref=asc_df_0884271951/?tag=hyprod-20&linkCode=df0&hvadid=312175933381&hvpos=1o1&hvnetw=g&hvrand=10925659369993672127&hvpone=&hvptwo=&hvqmt=&hvdev=c&hvdvcmdl=&hvlocint=&hvlocphy=9031326&hvtargid=pla-434522630098&psc=1&tag=&ref=&adgrpid=60258871817&hvpone=&hvptwo=&hvadid=312175933381&hvpos=1o1&hvnetw=g&hvrand=10925659369993672127&hvqmt=&hvdev=c&hvdvcmdl=&hvlocint=&hvlocphy=9031326&hvtargid=pla-434522630098) by Eliyahu Goldratt, a story much similar to the journey that the working group had been on. As the group discussed the book, they finally realized the fatal flaw they had made -- they had spent all of their time fixated on trying to achieve the local maximum of R&D instead of aligning the feedback loops between the value delivery system and the value capture system. \n\nThe most effective way to do this is by focusing on the efficiency of the Value Exchange System as a whole. As the group learned, this can be accomplished by measuring the Value Exchange System's throughput. It's important to note that this kind of throughput should not be confused with the definition in Kanban (work output), but rather the rate at which a system generates revenue through sales. The group had achieved a new level of alignment...almost. To start measuring this, they first needed to learn more about throughput accounting. \n\n## Throughput Accounting 101\n\n> “The most marvelous characteristic of some complex systems is their ability to learn, diversify, complexify, and evolve…Like resilience, self-organization is often sacrificed for purposes of short-term productivity and stability. Productivity and stability are the usual excuses for turning creative human beings into mechanical adjuncts to production processes.” -- [Thinking In Systems](https://www.amazon.com/Thinking-Systems-Donella-H-Meadows/dp/1603580557)\n\nIn stark contrast to cost-based accounting, [throughput accounting](https://en.wikipedia.org/wiki/Throughput_accounting) prioritizes value creation over cost-cutting. It consists of [three primary metrics](https://www.amazon.com/Scaling-Lean-Mastering-Metrics-Startup/dp/1101980524):\n\n- **Throughput** is the rate at which monetizable value is generated from a company's customers over their lifetime minus any variable costs such as the cost of customer acquisition. \n- **Inventory** represents all of the money a company has invested into the Value Exchange System including current product capabilities, unfinished goods (requirements, features, job stories, etc.), equipment, and infrastructure. \n- **Operating Expenses** are the costs expended turning inventory into throughput. This includes research and development, sales, and general expenses. \n\nThese metrics are then built up to determine the financial health of a given Value Exchange System:\n\n- **Throughput (T)** = Lifetime Value (LTV) - Cost of Customer Acquisition (COCA)\n- **Profit (P)** = Total Throughput (T) - Operating Expenses (OE)\n- **Return On Investment (ROI)** = Profit (P) / Inventory (I)\n\nHere is where the differences between cost-based accounting and throughput accounting start to manifest. As Ash Maurya, points out in Scaling Lean:\n\n> Cost-based accounting places more emphasis on the right-hand side of the profit equation — decrease operating expenses. It focuses on scalable efficiency and squeezing out costs — especially labor costs. This typically manifests itself as policies requiring detailed weekly timesheets broken down by task, as well as downsizing, outsourcing, and other cost-reducing measures.\n\n> It is much more powerful to try to affect the left-hand side of the profit equation — increase throughput — because cost-cutting has a theoretical limit of zero. Increasing throughput has no theoretical upper limit. You can find ways to add more value to an existing product, build more add-on products, or expand the market — provided, of course, that these efforts lead to a positive return on investment.\n\nWhat's also important about looking at the Value Exchange System through the lens of throughput accounting is that it provides the preferred order for where to focus optimization efforts:\n\n1. Throughput\n1. Decreasing inventory\n1. Reducing operating expenses\n\n## The Universal Goal\n\nAs Eliyahu Goldratt, creator of throughput accounting and author of [The Goal](https://en.wikipedia.org/wiki/The_Goal_(novel)), frames it:\n\n> The universal goal of every business is to increase throughput while minimizing inventory and operating expenses provided doing that doesn't degrade throughput\n\nThe working group was starting to think differently. Throughput accounting provides a simple, but powerful decision support framework. Every target condition should map to one of three desired outcomes in the Value Exchange System - increase throughput (T), reduce investment (I), or reduce operating expenses (OE). It's also important to point out that throughput accounting does not use the balance sheet to [stash deferred expenses](https://www.scienceofbusiness.com/throughput-accounting/) to be realized later. This is a practice that many companies use to manipulate the income statement depending on the kind of story they want to tell. This ultimately masks the true health of the Value Exchange System. \n\nWhile excited about what they are learning, the working group was still a bit stumped about how to implement it within Acme in a way that would help drive improved revenue growth and profitability. They weren't able to formalize a proposal in time for the onsite, but they were now equipped with some better data points around why they had failed and planned to share the news with the executive group.\n\nContinue reading:\n\n- Part 1: [A Gentle(ish) Introduction](/blog/optimizing-the-value-exchange-a-gentle-introduction/\n)\n- Part 2 : Reduce Waste To Increase Flow\n- Part 3 (Next): [The Compounding Value Of Shorter Feedback Loops](/blog/optimizing-the-value-exchange-the-compounding-value-of-shorter-feedback-loops/)\n\n***Giving credit where it is due:** Throughput Accounting and the Theory Of Constraints was originally pioneered by Eliyahu Goldratt. Introduced in his book, [The Goal](https://en.wikipedia.org/wiki/The_Goal_(novel)), these concepts have revolutionized the way businesses operate.*",[744,3034,9],{"slug":3054,"featured":6,"template":699},"optimizing-the-value-exchange-reduce-waste-to-increase-flow","content:en-us:blog:optimizing-the-value-exchange-reduce-waste-to-increase-flow.yml","Optimizing The Value Exchange Reduce Waste To Increase Flow","en-us/blog/optimizing-the-value-exchange-reduce-waste-to-increase-flow.yml","en-us/blog/optimizing-the-value-exchange-reduce-waste-to-increase-flow",{"_path":3060,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3061,"content":3067,"config":3072,"_id":3074,"_type":13,"title":3075,"_source":15,"_file":3076,"_stem":3077,"_extension":18},"/en-us/blog/optimizing-the-value-exchange-the-compounding-value-of-shorter-feedback-loops",{"title":3062,"description":3063,"ogTitle":3062,"ogDescription":3063,"noIndex":6,"ogImage":3064,"ogUrl":3065,"ogSiteName":685,"ogType":686,"canonicalUrls":3065,"schema":3066},"Shorter feedback loops compound value in development","Part three of a pragmatic, business-driven guide to help teams transition from fixating on output to optimizing the value exchange with their customers.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672726/Blog/Hero%20Images/post-3-cover.jpg","https://about.gitlab.com/blog/optimizing-the-value-exchange-the-compounding-value-of-shorter-feedback-loops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Optimizing The Value Exchange: The Compounding Value Of Shorter Feedback Loops\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gabe Weaver\"}],\n        \"datePublished\": \"2019-12-16\",\n      }",{"title":3068,"description":3063,"authors":3069,"heroImage":3064,"date":3031,"body":3070,"category":1239,"tags":3071},"Optimizing The Value Exchange: The Compounding Value Of Shorter Feedback Loops",[2315],"If you're new to the series, checkout Part 1: [A Gentle(ish) Introduction](/blog/optimizing-the-value-exchange-a-gentle-introduction/\n)\n\n*Reading time: 12 minutes, 8 seconds*\n\n## The Problem Of The Tree Swing\n\n> “The most powerful ways to influence the behavior of a system is through its purpose or goal. That’s because the goal is the direction-setter of the system, the definer of discrepancies that require action, the indicator of compliance, failure, or success toward which balancing feedback loops work.” -- [Thinking In Systems](https://www.amazon.com/Thinking-Systems-Donella-H-Meadows/dp/1603580557)\n\nThe working group sat down with the leadership and department heads; things were tense. The CEO announced some major news -- the board planned to start cutting R&D budget in an attempt to drive profitability. A hiring freeze would be put into effect if profitability wasn't achieved within the next fiscal year. The board would also be considering a massive round of layoffs as an extreme measure. Everyone in the room understood the stark reality of the situation. There was a fundamental problem in how the company was operating.  \n\nAs discussions progressed, the team reviewed the dozens of major initiatives that had shipped over the last several quarters. As they went through these one-by-one, it was not immediately clear how they impacted business outcomes. Sales said that they were all the things customers requested. Product said they talked to the customers and had a validation process for work items during the planning stage. Someone from the working group asked a focusing question -- how does Acme understand how a product capability is used once it has been delivered to a customer? The roadmap was supposedly organized by needs that customers had expressed, but nothing was linking them to quantifiable changes in customer behavior that would result in specific business outcomes.\n\nProduct said they measured certain kinds of activities, but it didn't help them get a clear sense of whether something was working or not. Sales said that account managers regularly checked in with customers and kept track of the feedback. Customer support mentioned they share tickets with product from time-to-time. To grasp where the root issues were in the product, the team decided to call customers that requested features but hadn't fully adopted them. After dozens of interviews and ethnographic observations, everyone started to see a common pattern emerge.\n\n![Insert Tree Diagram](https://about.gitlab.com/images/blogimages/optimizing-the-value-exchange/tree-swing.jpeg){: .center}\n\nAcme wasn't building what customers needed. How had this happened? Each team interpreted things differently; each with a slight bias based on their functional area. They were never fully aligned. Even worse, they didn't understand what their customers needed to move their own businesses forward. They just built what they thought customers wanted. This is the essence of the tree swing problem. \n\nThe working group shared their revelation about optimizing for the wrong thing within R&D and suggested reviewing how success was being measured across the company. Each department pulled up their objectives and key results. Nearly all of them were aimed at each department seeking local maximum. Sales had their quotas to meet. R&D had specific output targets. Customer support was focused on triaging and closing tickets as quickly as possible. The company had grown rapidly, but that just amplified the underlying problems. The signal had been completely lost in the noise.\n\nAs the systemic problems became clear, the working group presented the concept of throughput accounting and how it could help align the organization around a single measure of success. The other team members were skeptical. Nothing else was working and the worst-case scenario was already upon them. Acme finally agreed to give it a try. They spent the next several days discussing what the transition would look like and the mechanics of how it could be measured. They also tackled ways to create tighter alignment across functional areas of the company. Together, they came up with a list of core things they would implement - all to shift the focus from measuring output to measuring outcomes.\n\n## Adopt throughput accounting over cost-based accounting\n\nThe first thing they tackled was converting their traditional cost-based accounting model to the throughput accounting model. They still maintained the traditional models for public reporting, but internally, they would make decisions based upon ROI as defined in throughput accounting.\n\nAcme's original cost-based approach for the previous quarter:\n\n| Income Statement | Previous Period |\n| --- | --- |\n| Revenue | 68.0 |\n| COGS | 6.1 |\n| **Net Sales** | **61.9** |\n| _Margin_ | _91%_ |\n|  |  |\n| R&D | 27.2 |\n| Marketing | 9.6 | \n| Sales | 25.6 | \n| G&A | 6.4 |\n| **Total OpEx** | **68.8** |\n| |  |\n| **Net Profit** | **-6.9** |\n| _Profit Margin_ | _-10%_ |\n\nAcme's original balance sheet for the previous quarter (simplified of course):\n\n| Balance Statement | Previous Period |\n| --- | --- |\n| Cash | 201.2 |\n| Accounts Receivable | 3.1 |\n| Goodwill (Product/Intangibles Last 2 Qs) | 52.4 (25.2 + 27.2) |\n| | |\n| **Total Assets** | **284.7** |\n|  |  |\n| Current Liabilities | 16.9 |\n| Long Term Liabilities | 21.5 | \n| **Total Liabilities** | **38.4** | \n|  | |\n| Retained Earnings | -6.9 |\n| Shareholder's Equity | 253.2 |\n| |  |\n| **Total Liabilities & Equity** | **284.7** |\n\nThe working group explained how this approach artificially rewarded the company by converting expenses in R&D to an intangible goodwill asset. There is no feedback on how that asset generates and captures value -- throughput. It also came to light that the financial team was deferring certain R&D expenses as a way to bolster performance on the income statement in the short term. They had planned to realize the expenses when Acme hit the inflection point as a tax mitigation strategy, but that never happened. \n\nOnce they were finished converting their financials to throughput accounting, they were overwhelmed by the reality of their situation.\n\n| TA | Previous Period |\n| --- | --- |\n| Revenue | 68.0 |\n| Total Variable Costs (COCA) | 13.6 |\n| **Throughput** | **54.4** |\n| _Efficiency_ | _80%_ |\n|  |  |\n| R&D | 34.2 |\n| Marketing | 2.1 | \n| Sales | 25.6 | \n| G&A | 6.4 |\n| **Total OpEx** | **68.3** |\n| | |\n| **Profit** | **-13.8** |\n| | |\n| Inventory: Goodwill (previous quarter R&D) | 25.2 |\n| Inventory: 6 Months of Requirements | 120,868 |\n| **Total Inventory** | **120,893.2** |\n| | |\n| **Return On Investment** |  **-.01%** = (-13.8/120,893.2) |\n\nSince thoughput accounting (TA) defines inventory as completed and uncompleted assets within the Value Exchange System, Acme's ROI was tanked by all of the work waiting to be completed. The powerful thing that this helped Acme understand was that creating more inventory drains the overall return on investment. It also learned that by treating some of the variable costs related to COCA as an operating expense, they missed critical opportunities for increasing the efficiency of marketing operations.\n\nLooking at the financials through this perspective, Acme struggled to determine where to focus their efforts. The ongoing conversation about this exposed another critical flaw in how Acme had been approaching the Value Exchange System. Due to the speed at which they had scaled, 60,000+ work items piled up in their backlog. They struggled to understand their potential impact and effectively map them to customer and business outcomes. A product manager shared how they used they had successfully used the RICE model when they were smaller, but it had become highly inaccurate at scale given the quality of the input data and variability across product teams. She continued to explain that often, the loudest voice in the room would ultimately influence when things were worked on. In listening to her describe the flow of information between sales, customer support, and R&D, it was clear that everyone was working hard, but with such a high volume of communications, it was nearly impossible to take into account all the variables across the disparate contexts when prioritizing the roadmap.\n\nA data scientist overheard the group talking about their challenges and interjected with a suggestion -- Acme was already using machine learning and advanced statistics in their products. Why not use the same approach to optimizing their own Value Exchange System?\n\n## The first iteration towards intelligent decision support\n\nThe data scientist explained that by using pre-existing data from sources across their toolchain, it was possible to build a decision support framework using [natural language processing (NLP)](https://medium.com/analytics-vidhya/automated-keyword-extraction-from-articles-using-nlp-bfd864f41b34) to extract topics and sentiment from text-based data across all of the core tools that Acme used to sell, deliver, and support their products. Teams would not have to adopt new tools or processes, and there would be no disruption to existing business functions. Everyone put their heads together and came up with the scope for the first MVC of their intelligent decision support framework.\n\n![Decision Support Framework - Iteration 1](https://about.gitlab.com/images/blogimages/optimizing-the-value-exchange/d5.png){: .center}\n\nOnce they achieved this level of visibility, they used the topics to create alignment across teams. Additionally, instead of continuing forward using traditional project-based Gannt charts for communicating progress on timelines and deliverables, they adopted an Opportunity Tree map that better reflected the relationship between business outcomes and solving customer needs. Based on the number of customers and the sentiment of a given topic, Acme prioritized where to focus their efforts to yield the biggest impact. Looking through all of a topic's related texts across their various communication channels within a single interface, they were able to more efficiently and effectively synthesize opportunities and connected them to outcomes.   \n\n![Acme's Opportunity Tree](https://about.gitlab.com/images/blogimages/optimizing-the-value-exchange/d6.png){: .center}\n\nTo ensure they were building what the customers needed and that the solutions captured the opportunity, they started using a [continuous innovation](https://leanstack.com/library/categories/fundamentals/courses/what_is_continuous_innovation/lessons/fundamental_shift) framework to systemically validate their assumptions and solutions through [lean sprints](https://blog.leanstack.com/the-lean-sprint-bc3f9f8caafd). This did not require the introduction of a new tool, but rather a change in mindset and process among the teams.\n\n![The Lean Srint Diagram from Scaling Lean by Ash Maurya](https://about.gitlab.com/images/blogimages/optimizing-the-value-exchange/lean-sprints.png){: .center} \n\nEach sprint established a target condition, hypothesis, experiment, and measurable outcome to validate or invalidate the hypothesis. This new mental model and approach to lean product development helped Acme reduce risk and establish a repeatable cadence that accomplished the following:\n\n1. **Goal** — Break a big vision into smaller time-boxed goals.\n2. **Orient** — Align the team around problems versus solutions.\n3. **Leverage** — Source/rank a wide diversity of possible solutions or strategies for achieving the goal.\n4. **Experiment** — Test these strategies additively using experiments.\n5. **Analyze** — Reevaluate learning against the goal.\n6. **Next Action** — Decide what’s next. \n\n![The Lean Cycle Diagram from Scaling Lean by Ash Maurya](https://about.gitlab.com/images/blogimages/optimizing-the-value-exchange/lean-cycles.png){: .center} \n\nIt wasn't just product teams that adopted this process -- marketing, sales, and customer support all learned how to leverage this process to drive throughput. Before any initiative was funded, the team had to justify the effort by stating a quantifiable hypothesis that explained how it would increase throughput (T), decrease inventory (I), or reduce operating expenses (OE). By being disciplined, lean, manually improving processes, and having a rudimentary decision support framework powered by their existing data, they started to see enormous gains in their throughput. Acme was just starting to scratch the surface of the possibilities for optimizing their Value Exchange System. \n\n## The Importance Of A Timely Decision Support Framework\n\n> “You can make a system work better with surprising ease if you can give it more timely, more accurate, more complete information” -- [Thinking In Systems](https://www.amazon.com/Thinking-Systems-Donella-H-Meadows/dp/1603580557)\n\nAcme continued to invest in enhancing their intelligent decision support tool. Their ultimate goal was to model a new value stream that incorporated both the value delivery system and the value capture system. In their next iteration, they added new dimensions and types of features that were extracted from their various operational systems. They also connected their behavior analytics collection service to more effectively map how customer behavior drove specific outcomes and experiments. \n\n![Decision Support Tool - Iteration 2](https://about.gitlab.com/images/blogimages/optimizing-the-value-exchange/d7.png){: .center}\n\nBy pulling in financial data from their CRM, they derived the potential value for every single opportunity and how each mapped to the key business drivers in the Value Exchange System -- acquisition, activation, adoption, up-sell, and retention. Additionally, they automatically calculated the RICE score across all work items in a consistent manner. As a result, they removed over 99% of their inventory that did not map to valuable opportunities and adopted a \"just in time\" approach for requirements definition, drastically reducing inventory and improving ROI.\n\nAcme went a step further and augmented its Opportunity Tree used for strategic planning with a dynamic value stream map to report on the health of the Value Exchange System. Since the software code and features were integrated into the decision support framework, they consolidated all of their analytics, logging, tracing, and reporting tools that required complex queries to surface meaningful insights with a simple visual diagram of the critical paths customers took through their product. This effectively tied the Opportunity Tree to their actual product in a way that was easy for anyone at Acme to understand. It also surfaced low hanging fruit that enabled Acme to increase the value capture rate of the assets they already had.\n\n![Acme's New Value Stream Map](https://about.gitlab.com/images/blogimages/optimizing-the-value-exchange/d8.png){: .center}\n\nTo put their journey succinctly -- Acme transitioned from sensing and responding to predicting and acting. They did not become sales-led, engineering-centric, or product-first. They became throughput driven. They still have a long way to go before becoming ROI positive due to the inefficiencies of the past, but the progress thus far indicates an upward trajectory. Because of their successful transition to profitability, the board pushed Acme to continue scaling R&D and operating expenses. The CEO politely declined; showing the board their intelligent decision support tool that highlighted hundreds of millions of dollars in potential revenue just by improving what was already there. He explained that more people won't solve the problem or help them go faster. Better decisions will. \n\n![Insert Sense and Respond](https://about.gitlab.com/images/blogimages/optimizing-the-value-exchange/predictive-analytics.png){: .center} \n\nThe biggest challenge they face moving forward is staying disciplined and not falling into old habits -- something most companies are notorious for doing. It's one of the primary reasons why there are very few truly great companies out there today. Luckily, the investment in making better decisions through shorter, more effective feedback loops will let them know the day that starts happening. \n\n\u003Cbr>\n**Acme's Final Set of Reported Financials**\n\u003Cbr>\n\n| TA | Without Decision Support | Starting The Journey | Fully Integrated |\n| --- | --- | --- | --- |\n| Revenue | 68.0 | 88.4 | 138.8 |\n| Total Variable Costs (COCA) | 13.6 | 11.2 | 7.6 |\n| **Throughput** | **54.4** | **77.2** | **131.2** |\n| _Efficiency_ | _80%_ | _87%_ | _95%_ |\n|  |  | | |\n| R&D | 34.2 | 37.1 | 40.5 |\n| Marketing | 2.1 | 3.2 | 4.5 | \n| Sales | 25.6 | 28.6 | 31.9 |\n| G&A | 6.4 | 8.0 | 9.5 |\n| **Total OpEx** | **68.3** | **76.9** | **86.4** |\n| | | | |\n| **Profit** | **-13.8** | **0.3** | **44.8** |\n| | | | |\n| Inventory: Goodwill | 25.3 | 59.5 | 96.6 |\n| Inventory: Requirements | 120,868 | 117,567 | 1,176.2 |\n| **Total Inventory** | **120,893.3** | **117,626.5** | **1,272.8** |\n| | | | |\n| **Return On Investment** |  **0%** | **0.0%** | **3.5%** |\n\n\u003Cbr>\n\n## Ending On A Personal Note\n\n> “Remember that hierarchies exist to serve the bottom layers, not the top. Don’t maximize parts of the systems or subsystems while ignoring the whole. Aim to enhance total system properties, such as growth, stability, diversity, resilience, and sustainability - whether they are easily measured or not.” -- [Thinking In Systems](https://www.amazon.com/Thinking-Systems-Donella-H-Meadows/dp/1603580557)\n\nI acknowledge Acme's story and financials were maybe extreme and, at times, overly exaggerated. I did so to help illustrate the main themes that nearly every company I've been a part of has struggled with to varying degrees. Ultimately, the longer we continue to rely on a Value Exchange operating structure and toolchain that consists of dozens of siloed point solutions and people organizations, the longer we will be fixated on the local optimization of its parts. We need to shift our focus towards achieving the global maximum of the Value Exchange System. This will be an ongoing challenge for nearly every company as tranditional organizational theory is over [250 years old](https://aboutleaders.com/10-traditional-organizational-culture-problems/#gs.ij731t) and was designed to replicate factories and machines, not dynamic systems and subsystems that are constantly evolving and changing.  \n\nAs we wrap up, you might be thinking the intelligent decision support tool I described is far fetched and a pipe dream. Topic extraction, cluster analysis, sentiment analysis, Bayesian inference, and NLP does not rely on Strong AI or necessarily adopting a whole new suite of tools. It is all achievable by a capable team of data scientists and engineers using currently available open-source packages, ML libraries, and advanced statistics leveraging data most companies already have on hand. As DevOps continues to take hold and organizations shift from a project to product mindset, it's just a matter of time until someone capitalizes on the tremendous opportunity there is in optimizing the feedback loops from sales, marketing, product, engineering, UX, and customer support at scale.\n\nWhen I first started at GitLab, I thought the idea of building a single application for the entire DevOps lifecycle was insane and nearly unachievable. That challenge is part of what drew me to the company. Now that I have more perspective, I'm starting to realize that maybe we've been thinking too small. Anyone up for some dogfooding?   \n\nAll posts in the series:\n\n- Part 1: [A Gentle(ish) Introduction](/blog/optimizing-the-value-exchange-a-gentle-introduction/\n)\n- Part 2: [Reduce Waste To Increase Flow](/blog/optimizing-the-value-exchange-reduce-waste-to-increase-flow/)\n- Part 3: The Compounding Value Of Shorter Feedback Loops\n\n_**Giving credit where it is due:** The Lean Sprint and Continuous Innovation Framework were developed by Ash Maurya, founder of [LeanStack](https://leanstack.com/)._\n\nCover photo by Ryan Hafey on [Unsplash](https://unsplash.com/photos/PdJjT-zYg_E)\n",[744,3034,9],{"slug":3073,"featured":6,"template":699},"optimizing-the-value-exchange-the-compounding-value-of-shorter-feedback-loops","content:en-us:blog:optimizing-the-value-exchange-the-compounding-value-of-shorter-feedback-loops.yml","Optimizing The Value Exchange The Compounding Value Of Shorter Feedback Loops","en-us/blog/optimizing-the-value-exchange-the-compounding-value-of-shorter-feedback-loops.yml","en-us/blog/optimizing-the-value-exchange-the-compounding-value-of-shorter-feedback-loops",{"_path":3079,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3080,"content":3085,"config":3092,"_id":3094,"_type":13,"title":3095,"_source":15,"_file":3096,"_stem":3097,"_extension":18},"/en-us/blog/package-key-extension",{"title":3081,"description":3082,"ogTitle":3081,"ogDescription":3082,"noIndex":6,"ogImage":803,"ogUrl":3083,"ogSiteName":685,"ogType":686,"canonicalUrls":3083,"schema":3084},"GitLab extends package signing key expiration to 2022","Our GPG key will now expire on July 1, 2021. Here's what you need to know.","https://about.gitlab.com/blog/package-key-extension","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab will extend package signing key expiration by one year\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gerard Hickey\"}],\n        \"datePublished\": \"2020-06-25\",\n      }",{"title":3086,"description":3082,"authors":3087,"heroImage":803,"date":3089,"body":3090,"category":787,"tags":3091},"GitLab will extend package signing key expiration by one year",[3088],"Gerard Hickey","2020-06-25","\n\nGitLab has a GPG key used to sign all Omnibus packages created within the [CI\npipelines](/blog/guide-to-ci-cd-pipelines/). This key is set to expire on 2020-07-01 and will be extended to\nexpire on 2021-07-01.\n\n## Why is this being done?\n\nThe package signing key is set to a yearly expiration time to limit the exposure\nshould the key be compromised and to comply with GitLab security practices.\nGenerating a new key each year is much more obtrusive than continually extending\nthe expiration time.\n\n## What do I need to do?\n\nThe only action that needs to be taken is to update your copy of the package\nsigning key _if_ you validate the signatures on the Omnibus packages that GitLab\ndistributes.\n\nThe package signing key is not the key that signs the repository metadata\nused by the OS package managers like `apt` or `yum`. Unless you are specifically\nverifying the package signatures or have configured your package manager to verify\nthe package signatures, there is no action needed on your part to continue\ninstalling Omnibus packages.\n\nMore information concerning [verification of the package signatures](https://docs.gitlab.com/omnibus/update/package_signatures#package-signatures)\ncan be found in the Omnibus documentation. If you just need to refresh a copy\nof the public key, then you can find it on any of the GPG keyservers by\nsearching for support@gitlab.com or using the key ID of\n`DBEF 8977 4DDB 9EB3 7D9F  C3A0 3CFC F9BA F27E AB47`. Alternatively you could\ndownload it directly from packages.gitlab.com using the URL:\n\n    https://packages.gitlab.com/gitlab/gitlab-ce/gpgkey/gitlab-gitlab-ce-3D645A26AB9FBD22.pub.gpg\n\n## I still have problems, what do I do?\n\nPlease open an issue in the [omnibus-gitlab issue tracker](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/new?issue&issuable_template=Bug).\n",[787,1842,9],{"slug":3093,"featured":6,"template":699},"package-key-extension","content:en-us:blog:package-key-extension.yml","Package Key Extension","en-us/blog/package-key-extension.yml","en-us/blog/package-key-extension",{"_path":3099,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3100,"content":3106,"config":3112,"_id":3114,"_type":13,"title":3115,"_source":15,"_file":3116,"_stem":3117,"_extension":18},"/en-us/blog/people-ops-using-gitlab",{"title":3101,"description":3102,"ogTitle":3101,"ogDescription":3102,"noIndex":6,"ogImage":3103,"ogUrl":3104,"ogSiteName":685,"ogType":686,"canonicalUrls":3104,"schema":3105},"GitLab People Ops: Getting drunk on our own wine","How our People Ops team uses GitLab day to day: from onboarding new GitLab team-members to keeping our handbook up to date.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678697/Blog/Hero%20Images/how-people-ops-uses-gitlab.jpg","https://about.gitlab.com/blog/people-ops-using-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab People Ops: Getting drunk on our own wine\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chloe Whitestone\"}],\n        \"datePublished\": \"2018-05-25\",\n      }",{"title":3101,"description":3102,"authors":3107,"heroImage":3103,"date":3109,"body":3110,"category":811,"tags":3111},[3108],"Chloe Whitestone","2018-05-25","\nWe’ve heard people say \"[every company is a software company](https://www.forbes.com/sites/techonomy/2011/11/30/now-every-company-is-a-software-company/#5761b57cf3b1),\" but what about the people who work there? At GitLab, we [drink our own wine](/company/culture/), and that means all of our team members, in some way or another, are technical because we use GitLab ourselves. In [People Ops and recruiting](/handbook/people-group/), I use GitLab every day; just take a look at my [activity chart](https://gitlab.com/chloe)!\n\n![Chloe's GitLab Activity Chart](https://about.gitlab.com/images/blogimages/gitlab-chloe.png){: .shadow.medium.center}\n\nThese blue squares represent contributions I’ve made across the GitLab project (and the white ones prove that work/life balance exists!).\n\n## Getting started with issues\n\nA good portion of those blue squares are dedicated towards issues, specifically pre-established template issues, such as [the onboarding issue](https://gitlab.com/gitlab-com/people-group/employment-templates/-/blob/main/.gitlab/issue_templates/onboarding.md). This is the \"first look\" our new hires have into GitLab and our workflow, and it’s a fantastic way to get them using issues, and thus GitLab the product, right away. One of the tasks in this issue is \"add yourself to the [team page](/company/team/),\" so within the first week at GitLab, all team members submit a merge request, even if they’ve never coded before. Another task is to \"make an improvement to the handbook,\" which both encourages new hires to submit another merge request and to explore our handbook and adopt our ethos of \"everyone can contribute.\"\n\n>within the first week at GitLab, all team members submit a merge request, even if they’ve never coded before\n\nOther issue templates we have and use regularly are [offboarding](https://gitlab.com/gitlab-com/people-group/employment-templates/-/blob/main/.gitlab/issue_templates/offboarding.md) and [opening new vacancies](https://gitlab.com/gitlab-com/people-ops/vacancy/blob/master/.gitlab/issue_templates/vacancy.md). People Ops uses these issue templates to maintain version control, enable everyone to contribute, and allow us to continually iterate and improve on how we onboard our new hires, all of which promote the GitLab [values](https://handbook.gitlab.com/handbook/values/).\n\nWe constantly iterate on all of our issue templates, predominantly the onboarding issue template mentioned above. You can view its [history](https://gitlab.com/gitlab-com/people-ops/employment/commits/master/.gitlab/issue_templates/onboarding.md) and see how everyone at the company iterates on our onboarding issue – not just People Ops, but also new hires and seasoned GitLab team-members. You can also view some of the ideas we’re working through in the [\"Overhaul onboarding for Ta-NEW-kis\" issue](https://gitlab.com/gitlab-com/people-ops/General/issues/105), and feel free to contribute your own ideas!\n\n## Transparent by default\n\nPeople Ops and HR departments are not typically considered transparent at most companies, but here at GitLab we try our best to be as transparent as possible. The only times we keep things confidential are when we are legally required to, or to protect someone’s privacy. Everything else is fair game! Some great examples in our handbook are our [identity data](/company/culture/inclusion/identity-data/), [internal feedback](/company/culture/internal-feedback/), and the questions we ask in our [screening calls with candidates](/handbook/hiring/interviewing/#screening-call). We make it a point to keep this data, as well as other handbook pages dedicated to People Ops and recruiting, up to date and accurate.\n\n## Everyone can contribute\n\nWe encourage our team members and the wider GitLab community to contribute and give us their ideas because they will have a fresh look and unique perspective, which can only improve our own understanding.\n\nI remember when I joined GitLab a year ago, I interviewed with [Sid Sijbrandij](/company/team/#sytses), our CEO, and he asked me what I wanted to accomplish within my first month at GitLab. I told him I wanted to become proficient in Git so that I could properly contribute, and he was surprised! But I was steadfast, and within my first two weeks, I’d already started contributing via my local machine. Sure, I’m not a developer by any means, but I use Git every day, have figured out quite a few things both on my own, and with the help of our #git-help Slack channel, was even granted merge powers last year! Here at GitLab, everyone can contribute, no matter what your background is.\n\nPhoto by [Maxime Le Conte des Floris](https://unsplash.com/) on Unsplash\n{: .note}\n",[790,696,9],{"slug":3113,"featured":6,"template":699},"people-ops-using-gitlab","content:en-us:blog:people-ops-using-gitlab.yml","People Ops Using Gitlab","en-us/blog/people-ops-using-gitlab.yml","en-us/blog/people-ops-using-gitlab",{"_path":3119,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3120,"content":3126,"config":3131,"_id":3133,"_type":13,"title":3134,"_source":15,"_file":3135,"_stem":3136,"_extension":18},"/en-us/blog/personas-and-empathy-building",{"title":3121,"description":3122,"ogTitle":3121,"ogDescription":3122,"noIndex":6,"ogImage":3123,"ogUrl":3124,"ogSiteName":685,"ogType":686,"canonicalUrls":3124,"schema":3125},"How we use personas to build empathy for different types of users","Welcome to our series on the new GitLab personas!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678789/Blog/Hero%20Images/how-we-use-personas-to-gain-empathy.jpg","https://about.gitlab.com/blog/personas-and-empathy-building","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we use personas to build empathy for different types of users\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Katherine Okpara\"}],\n        \"datePublished\": \"2018-10-12\",\n      }",{"title":3121,"description":3122,"authors":3127,"heroImage":3123,"date":3128,"body":3129,"category":832,"tags":3130},[2512],"2018-10-12","\nLast year we discussed our motivations for using personas at GitLab, including [why they're important](/blog/the-importance-of-ux-personas/) and how to [create them through UX research](/blog/discovering-gitlabs-personas/). Since then, our teams have had many conversations about improving the design of our product and continuing to empathize with our users. As a result, we created an initiative to fully incorporate personas into our design process. This will help everyone learn more about the different people who use GitLab!\n\n#### What’s New\nWe’ve made several changes in format since the first iteration of personas:\n\n- Gender-neutral name: humanizing the persona while still ensuring that it is inclusive\n- Job description: helping your audience learn about what the user does and who they work with\n- “Jobs-to-be-done” (JTBD) framework: making the information more concise and easier to digest\n- Alternative job titles: understanding how the research findings apply to other user groups with similar needs and challenges\n\nTypically, the most insightful personas are a realistic representation of user needs. They help you understand who you’re designing for and allow other people in all departments of your company to hear directly from users. In this series, we’ll share findings from our recent round of research and highlight what we’ve learned about each role.\n\n#### Want to learn more?\nYou can now view the personas [in our handbook](https://handbook.gitlab.com/handbook/product/personas/). Here's a quick summary of what's inside:\n* [Parker, Product Manager](https://handbook.gitlab.com/handbook/product/personas/#parker-product-manager)\n* [Delaney, Development Team Lead](https://handbook.gitlab.com/handbook/product/personas/#delaney-development-team-lead)\n* [Devon, DevOps Engineer](https://handbook.gitlab.com/handbook/product/personas/#devon-devops-engineer)\n* [Sasha, Software Developer](https://handbook.gitlab.com/handbook/product/personas/#sasha-software-developer)\n* [Sydney, Systems Administrator](https://handbook.gitlab.com/handbook/product/personas/#sidney-systems-administrator)\n* [Sam, Security Analyst](https://handbook.gitlab.com/handbook/product/personas/#sam-security-analyst)\n\nHow does your team use personas in the design process? Connect with us [@gitlab](https://twitter.com/gitlab), and stay tuned for the next posts, where we’ll dive deep into the findings, limitations, and opportunities of each.\n\n[Photo](https://unsplash.com/photos/fgiFAtH0QBU) by [gabrielle cole](https://unsplash.com/@gabriellefaithhenderson) on Unsplash.\n{: .note}\n",[789,9],{"slug":3132,"featured":6,"template":699},"personas-and-empathy-building","content:en-us:blog:personas-and-empathy-building.yml","Personas And Empathy Building","en-us/blog/personas-and-empathy-building.yml","en-us/blog/personas-and-empathy-building",{"_path":3138,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3139,"content":3144,"config":3149,"_id":3151,"_type":13,"title":3152,"_source":15,"_file":3153,"_stem":3154,"_extension":18},"/en-us/blog/positive-outcomes-ci-cd",{"title":3140,"description":3141,"ogTitle":3140,"ogDescription":3141,"noIndex":6,"ogImage":1171,"ogUrl":3142,"ogSiteName":685,"ogType":686,"canonicalUrls":3142,"schema":3143},"4 Benefits of CI/CD","Learn how to implement and measure a successful CI/CD pipeline strategy and help your DevOps team deliver higher quality software, faster!","https://about.gitlab.com/blog/positive-outcomes-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"4 Benefits of CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-06-27\",\n      }",{"title":3140,"description":3141,"authors":3145,"heroImage":1171,"date":3146,"body":3147,"category":718,"tags":3148},[1113],"2019-06-27","\n[CI/CD](/topics/ci-cd/) helps DevOps teams ship higher quality software, faster, for improved software deployment. But is all [CI/CD](/topics/ci-cd/) created equal? What do the benefits of continuous integration, continuous delivery, and continuous deployment look like and how do you know you're on the right track?\n\nIn this four-part series, we talk about modernizing your CI/CD: Challenges, impact, outcomes, and solutions. In [part one](/blog/modernize-your-ci-cd/), we focused on common CI/CD challenges. In [part two](/blog/business-impact-ci-cd/), we talked about the revenue impacts. Today, we’ll talk about what CI/CD can deliver and how to measure its success.\n\nIf these problems hit a little too close to home, stay tuned for part four where we dive deeper into finding the right CI/CD solution for you.\n\n## What are some of the benefits of a good CI/CD strategy?\n\n### 1. Increased speed of innovation and ability to compete in the marketplace\n\nTwo identical companies: One implements [CI/CD technology](/topics/ci-cd/) and the other doesn’t. Who do you think deploys applications faster? While this seems like a silly comparison, because _of course_ the company with more automation deploys faster, there are organizations out there still convinced they don’t need CI/CD because they’re not looking at their competition. Organizations that understand the importance of CI/CD are setting the pace of innovation for everyone else.\n\n### 2. Code in production is making money instead of sitting in a queue waiting to be deployed\n\nOrganizations that have implemented CI/CD are making revenue, satisfying customers, and getting user feedback on the product features they deploy, not waiting for a manual check to see if the code is up to par. They already know the code is good because they have tests that are automated, and continuous delivery means that code is deployed automatically if it meets certain standards. They’ve removed human error and delays from the process so they can ship more code to production.\n\n### 3. Great ability to attract and retain talent\n\nEngineers that can focus on what they’re best at will be happier and more productive, and that has far-reaching impact. Turnover can be expensive and disruptive. A good CI/CD strategy means engineers can work on important projects and not worry about time-consuming manual tasks. They can also work confidently knowing that errors are caught automatically, not right before deployment. This kind of cooperative engineering culture inevitably attracts talent.\n\n### 4. Higher quality code and operations due to specialization\n\nThe development team can focus on dev. The operations team can focus on ops. Bad code rarely makes it to production because continuous testing is automated. Developers can focus on the code rather than the production environment, and operations doesn’t have to feel like a gatekeeper or a barrier. Both teams can work to their strengths, and automated handoffs make for seamless processes for the entire team. [This kind of cooperation makes DevOps possible](/topics/devops/build-a-devops-team/) and improves code quality.\n\n## What capabilities are required to make this happen?\n\n### 1. Robust CI/CD\n\nWhen we use the term “robust,” it’s all about avoiding half-baked or partial solutions. There are several CI/CD solutions out there but there are varying degrees of effectiveness. Continuous integration and continuous delivery go hand in hand, so having a solution that offers both is ideal. The tool you use should offer the automation you need, not just some. If your CI/CD tool is prone to failure or “brittle,” it can be just one more thing to manage. This was precisely why [the team at Ticketmaster replaced Jenkins CI and moved to weekly releases](/blog/continuous-integration-ticketmaster/), decreasing their pipeline execution time from two hours to only _eight minutes_ to build, test, and publish artifacts.\n\n### 2. Containers and Kubernetes\n\nContainers have made a huge impact on the way companies build and deploy code. While it was once difficult to develop applications with a [microservices architecture](/blog/strategies-microservices-architecture/), over the past five years it has become considerably easier with container orchestration tools like Kubernetes, comprehensive CI/CD tools that automate testing and deployments, and APIs that update automatically. Breaking up services so they can run independently reduces dependencies and creates better workflows.\n\n### 3. Functionality for the entire DevOps lifecycle\n\nVisibility is a huge asset when improving DevOps workflows. For some teams, they can have several tools handling different facets of the software development lifecycle (SDLC), which creates integration issues, maintenance issues, visibility issues, and is [just plain expensive](/calculator/roi/) from a cost standpoint. A complex toolchain can also weaken security. In a [Forrester survey of IT professionals](/resources/downloads/201906-gitlab-forrester-toolchain.pdf), 45% said that they had difficulty ensuring security across the toolchain.\n\n## How would you measure the success of a CI/CD strategy?\n\n### 1. Cycle time\n\nCycle time is the speed at which a [DevOps team](/topics/devops/) can deliver a functional application, from the moment work begins to when it is providing value to an end user.\n\n### 2. Time to value\n\nOnce code is written, how long before it’s released? This delay from when code is written to running in production is the time to value, and is a bottleneck for many organizations. Continuous delivery as well as [examining trends in the QA process](/blog/trends-in-test-automation/) can help to overcome this barrier to quick deployments and frequent releases.\n\n### 3. Uptime, error rate, infrastructure costs\n\nUptime is one of the biggest priorities for the ops team, and with a good CI/CD strategy that automates different processes, they should be able to focus more on that goal. Likewise, error rates and infrastructure costs can be easily measured once CI/CD is put in place. Operations goals are a key indicator of process success.\n\n### 4. Team retention rate\n\nHappy developers stick around, so looking at retention rates is a reliable way to gauge how well new development processes and applications are working for the team. It might be tough for developers to speak up if they don’t like how things are going, but looking at retention rates can be one step in identifying potential problems.\n\nThe benefits of a good CI/CD strategy are felt throughout an organization: From HR to operations, teams work better and achieve goals. In such a competitive development landscape, having the right CI/CD in place gives any company an edge.\n\nSo what makes “good” CI/CD? We invite you to compare GitLab CI/CD to other CI tools and see why we were rated #1 in the Forrester CI Wave™.\n",[722,108,9],{"slug":3150,"featured":6,"template":699},"positive-outcomes-ci-cd","content:en-us:blog:positive-outcomes-ci-cd.yml","Positive Outcomes Ci Cd","en-us/blog/positive-outcomes-ci-cd.yml","en-us/blog/positive-outcomes-ci-cd",{"_path":3156,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3157,"content":3163,"config":3169,"_id":3171,"_type":13,"title":3172,"_source":15,"_file":3173,"_stem":3174,"_extension":18},"/en-us/blog/postman-integration-with-gitlab-makes-your-api-workflows-easier",{"title":3158,"description":3159,"ogTitle":3158,"ogDescription":3159,"noIndex":6,"ogImage":3160,"ogUrl":3161,"ogSiteName":685,"ogType":686,"canonicalUrls":3161,"schema":3162},"Postman integration with GitLab makes API workflows easier","Learn how to use the git integration to link APIs in Postman to GitLab cloud repos.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671434/Blog/Hero%20Images/introducing-continuous-workflows.jpg","https://about.gitlab.com/blog/postman-integration-with-gitlab-makes-your-api-workflows-easier","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Postman integration with GitLab makes API workflows easier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andy Rogers\"}],\n        \"datePublished\": \"2022-08-24\",\n      }",{"title":3158,"description":3159,"authors":3164,"heroImage":3160,"date":3166,"body":3167,"category":832,"tags":3168},[3165],"Andy Rogers","2022-08-24","\n\nAPIs are more than just an interface. From a development lifecycle perspective, an API includes source code, definition files, tests, performance measurements, documentation, security audits, deployments, and feedback from API consumers. All of these elements are required for a successful API implementation. So, in partnership with GitLab, Postman created a git integration that allows users to link APIs in Postman to their GitLab cloud repos (on-prem versions of GitLab are only supported on [Postman Enterprise](https://www.postman.com/pricing/)).\n\nThe [Postman API Platform](https://blog.postman.com/new-postman-api-platform-redefining-api-management-for-api-first-world/) is designed to help teams collaborate seamlessly by providing tools for the entire API lifecycle. We understand that a fundamental part of the API lifecycle includes [developer workflows](https://blog.postman.com/the-reimagined-api-first-workflow-for-developers/) centered around code and source control.\n\n![illustration](https://about.gitlab.com/images/blogimages/postman1.png){: .shadow}\n\n## 4 key benefits for better collaboration\n\nThe launch of this integration earlier in the year provides four key benefits that empower teams to work faster and better together:\n\n**1.** It introduces the concept of version control into Postman. Users are now able to manage and sync branches, releases, versions, and tags for their APIs in GitLab and Postman. \n\n\n![screenshot of drop-down menu](https://about.gitlab.com/images/blogimages/postman2.png){: .shadow}\n\n\n**2.** Elements created in Postman can be pushed to a user’s GitLab repository, where the schema and collections can coexist alongside the source code. Likewise, branching workflows that your team might already be using can now be followed in Postman; external changes to code and API definitions are reviewable and can be merged back to Postman.\n\n\n![screenshot of branch info](https://about.gitlab.com/images/blogimages/postman3.png){: .shadow}\n\n**3.** This integration enables developers to think about API elements as the API itself, instead of treating code, API definitions, documentation, collections, tests, monitors, etc. as independent entities. All of these constitute the API. Moreover, this allows a higher-level view of the entire API, rather than just the source code — a critical requirement for any organization who wants to build a structured and robust API program.\n\n\n![screenshot of API info](https://about.gitlab.com/images/blogimages/postman4.png){: .shadow}\n\n\n**4.** The Postman-GitLab integration greatly minimizes the likelihood that downstream teams and API consumers will interact with outdated (or even deprecated) APIs or API elements. Users don’t have to spend time deciphering what API, collection, or documentation is current, since they can see what version they are working with all the way back to the code. In Postman, users also have direct access to real-time collaborative tools such as commenting and forking/merging to maintain synchronization between downstream API consumption and the source of truth.\n\n![illustration](https://about.gitlab.com/images/blogimages/postman5.png){: .shadow}\n\n## An integration for the API-first world\n\nOur partnership with GitLab supports our commitment to building Postman as the platform for the [API-first world](https://api-first-world.com/). With integrations like this, [API-first companies](https://blog.postman.com/what-is-an-api-first-company/) are now more productive, can deliver higher-quality products, and are able to build stronger ecosystems of developers, partners, and consumers. \n\nTo get started with the GitLab integration, check out [our guide](https://blog.postman.com/the-reimagined-api-first-workflow-for-developers/) and our how-to video for GitLab integration config:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/BL8DFOPncMc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n_Andy Rogers is product manager at Postman._\n\n\n",[722,9,232],{"slug":3170,"featured":6,"template":699},"postman-integration-with-gitlab-makes-your-api-workflows-easier","content:en-us:blog:postman-integration-with-gitlab-makes-your-api-workflows-easier.yml","Postman Integration With Gitlab Makes Your Api Workflows Easier","en-us/blog/postman-integration-with-gitlab-makes-your-api-workflows-easier.yml","en-us/blog/postman-integration-with-gitlab-makes-your-api-workflows-easier",{"_path":3176,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3177,"content":3183,"config":3188,"_id":3190,"_type":13,"title":3191,"_source":15,"_file":3192,"_stem":3193,"_extension":18},"/en-us/blog/pre-commit-post-deploy-is-dead",{"title":3178,"description":3179,"ogTitle":3178,"ogDescription":3179,"noIndex":6,"ogImage":3180,"ogUrl":3181,"ogSiteName":685,"ogType":686,"canonicalUrls":3181,"schema":3182},"Pre-commit and post-deploy code reviews are dead","In a world with Git, pre-commit and post-deploy code reviews are relics that can be eliminated from your workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678861/Blog/Hero%20Images/pre-commit.jpg","https://about.gitlab.com/blog/pre-commit-post-deploy-is-dead","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Pre-commit and post-deploy code reviews are dead\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2019-01-31\",\n      }",{"title":3178,"description":3179,"authors":3184,"heroImage":3180,"date":3185,"body":3186,"category":832,"tags":3187},[1680],"2019-01-31","\nPre-commit and post-deploy reviews have been the industry standard for ensuring that code is functioning as intended. But with Git around, are these methods still needed?\n\nLet’s take a step back and look at how they work.\n\n### Pre-commit reviews require that code is checked for bugs before it is committed\n\nOur CEO [Sid Sijbrandij](/company/team/#sytses) says pre-commit reviews makes sense because new code is evaluated before it is introduced into the code base. But with distributed version control, he says, you can essentially [do the same thing on Git branches](https://docs.gitlab.com/ee/topics/gitlab_flow.html). Prior to Git, branches were too pricey to use regularly in [version control systems](/topics/version-control/) like Subversion.\n\n### Post-deploy reviews periodically check for areas of improvement in the code base\n\nPost-deploy reviews are typically done on a periodic basis as a way to check certain areas of the code base and decide if improvements can be made. This method doesn’t make sense, according to Sid, because \"The code has already proven itself in production ... so you’re reluctant to make changes to it.\" Additionally, the idea of occasionally reviewing your code base is not really needed:\n\n\"If there's technical debt in there, at least it's not affecting other code,\" Sid explains. \"There's a certain interest you pay on technical debt, and it has to do with how much it spreads the technical debt to your code base. Code that is not doing much, meaning it's being executed but it's not changing much, well at least it's not influencing other code. You're always going to have tech debt, and you're always going to have a limited time during which you can review and fix things. Focus on the code that's active, that's probably the best place to focus.\"\n\n### Git branches are more efficient\n\nUsing Git branches to ensure that code is safe to introduce into the code base improves efficiencies when compared to pre-commit and post-deploy reviews, says Sid, who finds the former to be hard to track.\n\n\"Pre-commit code reviews were a bit awkward because you didn't have a good way to refer to it. It was in the tool, but you didn't have a SHA or definite way to refer to that version. And it was hard to know what CI it ran against because there wasn't a SHA. So by doing it post-commit, you have it in versions and it's much easier to see what you referred to. But with code review after deploy, the mindset was, 'If it works, you move on.'\n\n> \"If you change it, there's extra risk; if you don't change it, it's extra tech debt – and you always have to choose between the two.\"\n\n\"You're not going to be as vigilant to technical debt building up and it's harder to request that someone change something that’s working. If you change it, there's extra risk; if you don't change it, it's extra tech debt – and you always have to choose between the two. With pre-deploy code reviews, you don't have to make that choice …  [With what we have now], I think pre-commit and post-deploy code reviews are dead, and code should be reviewed on a branch before it's deployed to production.\"\n\nWhat do you think: Are pre-commit and post-deploy reviews a thing of the past? Tweet us @GitLab!\n{: .alert .alert-gitlab-purple.text-center}\n\nPhoto by [Caspar Camille Rubin](https://unsplash.com/photos/fPkvU7RDmCo?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on Unsplash\n{: .note}\n",[1385,1684,9],{"slug":3189,"featured":6,"template":699},"pre-commit-post-deploy-is-dead","content:en-us:blog:pre-commit-post-deploy-is-dead.yml","Pre Commit Post Deploy Is Dead","en-us/blog/pre-commit-post-deploy-is-dead.yml","en-us/blog/pre-commit-post-deploy-is-dead",{"_path":3195,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3196,"content":3201,"config":3206,"_id":3208,"_type":13,"title":3209,"_source":15,"_file":3210,"_stem":3211,"_extension":18},"/en-us/blog/project-management-using-gitlab-platform",{"title":3197,"description":3198,"ogTitle":3197,"ogDescription":3198,"noIndex":6,"ogImage":1854,"ogUrl":3199,"ogSiteName":685,"ogType":686,"canonicalUrls":3199,"schema":3200},"Can DevOps and project management co-exist? Yes, on the daily at GitLab","Stay agile by using GitLab for DevOps project management","https://about.gitlab.com/blog/project-management-using-gitlab-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Can DevOps and project management co-exist? Yes, on the daily at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vick Kelkar\"}],\n        \"datePublished\": \"2021-05-11\",\n      }",{"title":3197,"description":3198,"authors":3202,"heroImage":1854,"date":3203,"body":3204,"category":832,"tags":3205},[808],"2021-05-11","\n\nGitLab is best known as an all-in-one DevOps platform, but it is also an effective tool for project management. Non-technical teams at GitLab, such as [the Marketing team](/blog/gitlab-for-project-management-one/), use the GitLab DevOps platform for project management, and recently the Alliances team learned that DevOps and project management work well for our purposes.\n\n## About the IBM partnership\n\n[GitLab recently launched a partnership with IBM](/press/releases/2021-01-14-gitlab-IBM-to-support-acceleration-of-devops-automation.html) to help the organization automate their DevOps platform. Since I work on the Alliances team, I needed an efficient, compatible, and high-performance project management application to manage the many moving parts of the GitLab and IBM partnership as well as other projects related to our partnerships.\n\nMy very first instinct was to test a few of the project management web applications on the market, but this would involve a tedious process of convincing my colleagues to join me on this journey to explore a sprawling new set of tools. Then I thought why not explore our own Gitlab DevOps platform as a project management tool? The beauty of GitLab is that it is a [DevOps platform](https://www.youtube.com/watch?v=wChaqniv3HI) delivered as a single easy-to-use application.\n\nSome of my early questions were:\n\n- Can the GitLab DevOps platform work as a project management tool for the strategic Alliance team?\n- Can GitLab manage and track business activities over a period of time?\n- Can team members collaborate and manage various projects using a single application?\n\nIn the end, the journey to adopting GitLab as a DevOps platform and project management tool was similar to the journey many of our customers experience. In this blog post, I will dive deeper into how the Alliance team uses GitLab for project management, explain how we used GitLab to onboard a new strategic partner, and launched support of [GitLab Ultimate for IBM Cloud Paks](https://www.ibm.com/products/gitlab-ultimate). All the pre- and post-onboarding activities in particular required collaboration and contributions from various teams across the organization.\n\n## Applying DevOps features to project management\n\n### About epics and roadmaps\n\nWhy organize work into a hierarchy? I began the strategic partnership effort by organizing the work into multi-level epics. The [idea behind epics is to aggregate similar work](https://docs.gitlab.com/ee/user/group/epics/#epics) (or issues) into epics and manage delivery of work. In the example below, you'll see the top-level epic was called \"IBM cloud paks\" which contained three child epics.\n\n![An example of a multi-level epics from the IBM cloud paks project](https://about.gitlab.com/images/blogimages/proj-mgmt-epic.png){: .shadow.medium.center}\nWork is divided into three time-bound levels for the IBM cloud paks project: Pre-launch, 0-90 days, and 90-180 days.\n{: .note.text-center}\n\nAnother way to represent the epics is through a [roadmap view](https://docs.gitlab.com/ee/user/group/roadmap/#roadmap). The main advantage of this feature is that it allows the collaborators on epics and issues to monitor project progress using a calendar timeline view.\n\n![An example of a project management timeline for the IBM cloud paks project using the epics roadmap view](https://about.gitlab.com/images/blogimages/proj-mgmt-timeline.png){: .shadow.medium.center}\nThe same IBM cloud paks project epic is depicted using the Roadmap view, which adopts a timeline view.\n{: .note.text-center}\n\n### How issues are used to capture work\n\nClick into any of the epics to find a set of issues that make up the epic. I use [issues as the basic unit of work](https://docs.gitlab.com/ee/user/project/issues/). Contained within the \"IBM cloud paks: Pre-launch\" epic are 33 issues.\n\n![The list view shows inside the \"IBM cloud paks: Pre-launch\" epic are 33 issues](https://about.gitlab.com/images/blogimages/proj-mgmt-issue.png){: .shadow.medium.center}\nInside the \"IBM cloud paks: Pre-launch\" epic are 33 issues\n{: .note.text-center}\n\nOne thing to note is that an issue can have a single assignee or owner, or it can have multiple assignees.\n\n### How to use issue boards\n\nAn [agile board](/blog/gitlab-for-agile-portfolio-planning-project-management/) can help a user visualize work and manage all the open threads in a given epic and/or project. The board can help you move issues efficiently through various phases of work. On the Alliances team, we are always iterating on how to better track the status of issues. [Here is more information about the current status flows for the Alliances team](/handbook/alliances/#status-alliance---status--status).\n\nThe screenshot below shows how an [issue board can be applied as a Kanban board by filtering for the \"IBM\" label](https://docs.gitlab.com/ee/user/project/issue_board.html#issue-boards). To see transitions between work stages, use [scoped labels](https://docs.gitlab.com/ee/user/project/labels.html#scoped-labels), which are mutually exclusive and represent transitions between various workflow statuses, such as \"status::1\" and \"status::2\"\n\n![Kanban board showing how labels can be used to organize issues into work stages](https://about.gitlab.com/images/blogimages/proj-mgmt-board.png){: .shadow.medium.center}\nHow we use boards for the IBM cloud paks project.\n{: .note.text-center}\n\n### Milestones help time-box events\n\nWhile an epic is a collection of related issues, [merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/), and sub-epics and is generally used to scope a long-running initiative or program (e.g., a marketing campaign or a new product category) epics can also contain smaller, more discrete and timeboxed events, such as monthly releases or calendar quarters. These [timeboxes are represented as Milestones](https://docs.gitlab.com/ee/user/project/milestones/), which roll up issues and merge requests in the same way as higher-level epics. Apply the \"Milestone view\" to track progress on the smaller deliverables within an epic.\n\n![Milestone view showing Alliances team projects](https://about.gitlab.com/images/blogimages/proj-mgmt-milestone.png){: .shadow.medium.center}\nHow milestones can be used to track work progress within a specific time frame.\n{: .note.text-center}\n\n### How Milestone burnup and burndown charts chart progress\n\n[Burnup and burndown charts are used by project managers to measure progress](https://docs.gitlab.com/ee/user/project/milestones/burndown_and_burnup_charts.html). Burndown charts analyze how much work is left in a project before it can be finished successfully. Burnup charts measure the work that has been done against the total work for the project. Both types of charts are available in the GitLab DevOps platform. I relied mostly on epics and milestones to track work progress for the IBM partnership.\n\n![burndown](https://about.gitlab.com/images/blogimages/proj-mgmt-burndown.png){: .shadow.medium.center}\nThe burdown and burnup charts for the IBM cloud paks partnership project.\n{: .note.text-center}\n\n### Inside analytics and insights project management tools\n\nMost project management tools are great at capturing project details, and can help answer questions such as \"where does the project stand on actual vs. planned activities?\" or can help track progress using milestones and due dates. [Project analytics and insights dashboards](https://docs.gitlab.com/ee/user/analytics/#project-level-analytics) are built into the GitLab DevOps platform. There are many built-in analytics dashboards, such as CI/CD, code review, merge requests, and issues. For the IBM partnership project, I used the [issues dashboard analytics](https://docs.gitlab.com/ee/user/group/issues_analytics/index.html) to see how many issues were opened compared to how many issues were closed. This tool helped me manage the team capacity and identify any bottlenecks in the project.\n\n![The insights dashboard shows how many issues were opened and closed](https://about.gitlab.com/images/blogimages/proj-mgmt-insights.png){: .shadow.medium.center}\nThe insights dashboard shows many issues were opened vs. how many issues were closed each month.\n{: .note.text-center}\n\n[Value Stream Analytics](https://docs.gitlab.com/ee/user/group/value_stream_analytics/) is a particularly unique feature of GitLab's analytics suite. Since GitLab is a complete DevOps platform with a single data store, GitLab can automatically generate reports to not only identify high-level metrics and blockers, but also drill down into those blockers and improve value flow with just a few clicks.\n\n![Showing recent project activity: 32 new issues and 19 commits](https://about.gitlab.com/images/blogimages/proj-mgmt-analysis.png){: .shadow.medium.center}\nAnalytics showing recent project activity.\n{: .note.text-center}\n\nThe Value Stream Analytics provides a high-level view into common stages of the SDLC out-of-the-box, making it easier to monitor the overall workflow from discussion to code changes, through review and collaboration, and out to production – with no additional work required. And since the code changes and collaboration are happening within GitLab, just one click on an item will take you to the blocked issue or merge request, so you can comment, reassign, or contribute to move things along.\n\nSince all the necessary data is already in GitLab's system, customizing Value Stream Analytics can be completed in just a few clicks: Hiding and reordering stages and even creating your own with simple drop-down menus.\n\n![The customized value stream shows the average amount of time spent in the selected stage for each item](https://about.gitlab.com/images/blogimages/proj-mgmt-valuestream.png){: .shadow.medium.center}\nThe custom value stream above shows the number of days to completion.\n{: .note.text-center}\n\n## DevOps platform and project management in one\n\nThere are many project management tools in the marketplace and solutions for managing the SDLC of a project. The GitLab DevOps platform and project management tool satisfied my need to track partnership-related activities while also managing the technical demos and workshops developed for the IBM partnership. I look forward to continuing to explore the constantly-evolving GitLab platform to grow and manage our strategic partnerships on the Alliances team.\n\nCover image by [Martin Sanchez](https://unsplash.com/@martinsanchez?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/MD6E2Sv__iA)\n{: .note.text-center}\n",[744,696,834,1241,9],{"slug":3207,"featured":6,"template":699},"project-management-using-gitlab-platform","content:en-us:blog:project-management-using-gitlab-platform.yml","Project Management Using Gitlab Platform","en-us/blog/project-management-using-gitlab-platform.yml","en-us/blog/project-management-using-gitlab-platform",{"_path":3213,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3214,"content":3220,"config":3226,"_id":3228,"_type":13,"title":3229,"_source":15,"_file":3230,"_stem":3231,"_extension":18},"/en-us/blog/protecting-manual-jobs",{"title":3215,"description":3216,"ogTitle":3215,"ogDescription":3216,"noIndex":6,"ogImage":3217,"ogUrl":3218,"ogSiteName":685,"ogType":686,"canonicalUrls":3218,"schema":3219},"How to limit access to manual pipeline gates and deployments using GitLab","Let's look at how to use protected environments to set up access controls for production deployments and manual gates.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681105/Blog/Hero%20Images/protect_manual_jobs.jpg","https://about.gitlab.com/blog/protecting-manual-jobs","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to limit access to manual pipeline gates and deployments using GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Thao Yeager\"}],\n        \"datePublished\": \"2020-02-20\",\n      }",{"title":3215,"description":3216,"authors":3221,"heroImage":3217,"date":3223,"body":3224,"category":832,"tags":3225},[3222],"Thao Yeager","2020-02-20","This blog post was originally published on the GitLab Unfiltered\nblog. It was reviewed and republished on\n2020-02-21.\n\n{: .alert .alert-info .note}\n\n\nIn our world of automation, why would anyone want to do something manually?\nManual has become almost synonymous with inefficient. But, when it comes to\nCI/CD pipelines, a properly configured **manual** job can be a powerful way\nto control deployments and satisfy compliance requirements. Let’s take a\nlook at how manual jobs can be defined to serve two important use cases:\nControlling who can deploy, and setting up manual gates.\n\n\n## Limit access to deploy to an environment\n\n\nDeploying to production is a mission-critical occurence that should be\nprotected. Projects with a Kubernetes cluster could benefit from moving to a\ncontinuous deployment (CD) model in which a [branch or merge request, once\nmerged, is auto-deployed to\nproduction](https://docs.gitlab.com/ee/topics/autodevops/index.html#auto-deploy),\nand the absence of human intervention avoids mishaps. But for projects not\nyet configured for CD, let's consider this use case: Imagine a pipeline with\na manual job to deploy to prod, which can be triggered by any user with\naccess to push code. The risk of a unplanned, unintended production\ndeployment is very real.\n\n\nFortunately, it’s possible to use [protected\nenvironments](https://docs.gitlab.com/ee/ci/environments/protected_environments/)\nto prevent just anyone from deploying to production. When [configuring a\nprotected\nenvironment](https://docs.gitlab.com/ee/ci/environments/protected_environments.html#protecting-environments),\nyou can define the roles, groups, or users to whom deploy access is granted.\nThe protected environment can then be defined in a manual job to deploy\nwhich limits who can run it. The configuration could look something like\nthis:\n\n\n```yaml\n\ndeploy_prod:\n  stage: deploy\n  script:\n    - echo \"Deploy to production server\"\n  environment:\n    name: production\n    url: https://example.com\n  when: manual\n  only:\n    - master\n```\n\n\nIn the example above, the keyword `environment` is used to reference a\nprotected environment (as [configured in project\nsettings](https://docs.gitlab.com/ee/ci/environments/protected_environments.html#protecting-environment))\nwith a list of users who can run the job, in this case deploy to the named\nenvironment. Users without access see a disabled **play** button and are\nunable to execute the job.\n\n\n## Add an approval step\n\n\nCompliance rules may specify that approval is required for certain\nactivities in a workflow, even if they aren't technically a deployment step\nthemselves. In this use case, an approval step can also be added in the\npipeline that prompts an authorized user to take action to continue. This\ncan be achieved by structuring your pipeline with an \"approve\" stage\ncontaining a special manual job – for example, the YAML to insert an\napproval stage before deployment could look like this:\n\n\n```yaml\n\nstages:\n  - build\n  - approve\n  - deploy\n\nbuild:\n  stage: build\n  script:\n    - echo Hello!\n\napprove:\n  stage: approve\n  script:\n    - echo Hello!\n  environment:\n    name: production\n    url: https://example.com\n  when: manual\n  allow_failure: false\n  only:\n    - master\n\ndeploy:\n  stage: deploy\n  script:\n    - echo Hello!\n  environment:\n    name: production\n    url: https://example.com\n  only:\n    - master\n```\n\n\nIn the YAML above, `allow_failure: false` [defines the manual job as\n\"blocking\"](https://docs.gitlab.com/ee/ci/yaml/#whenmanual), which will\ncause the pipeline to pause until an authorized user gives \"approval\" by\nclicking on the **play** button to resume. Only the users part of that\nenvironment list will be able to perform this action. In this scenario, the\nUI view of the pipeline in the example CI configuration above would look\nlike this:\n\n\n![Pipeline view of approval stage manual\njob](https://about.gitlab.com/images/blogimages/manual_job_approve_stage_ui.png){:\n.shadow}\n\n\n## Summary\n\n\nAs illustrated in the YAML examples and image above, manual jobs defined\nwith protected environments and blocking attributes are effective tools for\nhandling compliance needs as well as for ensuring there are proper controls\nover production deployments.\n\n\nTell us how using protected environments with manual jobs has secured your\ndeployments or whether blocking manual jobs helps you meet compliance and\nauditing. [Create an issue in the GitLab project issue\ntracker](https://gitlab.com/gitlab-org/gitlab/issues/new) to share your\nfeedback with us.\n\n\nCover image by [Diane Walton](https://unsplash.com/photos/BNnzmBmnPg4) on\n[Unsplash](https://unsplash.com)\n\n{: .note}\n",[108,974,9,834,722],{"slug":3227,"featured":6,"template":699},"protecting-manual-jobs","content:en-us:blog:protecting-manual-jobs.yml","Protecting Manual Jobs","en-us/blog/protecting-manual-jobs.yml","en-us/blog/protecting-manual-jobs",{"_path":3233,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3234,"content":3240,"config":3246,"_id":3248,"_type":13,"title":3249,"_source":15,"_file":3250,"_stem":3251,"_extension":18},"/en-us/blog/rebase-in-real-life",{"title":3235,"description":3236,"ogTitle":3235,"ogDescription":3236,"noIndex":6,"ogImage":3237,"ogUrl":3238,"ogSiteName":685,"ogType":686,"canonicalUrls":3238,"schema":3239},"How to use Git rebase in real life","From fixup to autosquash here are real world ways to leverage Git rebase.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682486/Blog/Hero%20Images/rebase-in-real-life.jpg","https://about.gitlab.com/blog/rebase-in-real-life","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use Git rebase in real life\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Toon Claes\"}],\n        \"datePublished\": \"2022-11-08\",\n      }",{"title":3235,"description":3236,"authors":3241,"heroImage":3237,"date":3243,"body":3244,"category":832,"tags":3245},[3242],"Toon Claes","2022-11-08","My colleague [Chris](/company/team/#chriscool) recently wrote about [how to\ntake advantage of Git\n\nrebase](/blog/take-advantage-of-git-rebase/). In this post we'll\n\nexplain how you can take these techniques, and apply them to daily developer\nlife.\n\n\n## Fixup\n\n\nImagine you have created a merge request, and there are some pipeline\nfailures\n\nand some comments from reviews, and suddenly your [commit\nhistory](/blog/keeping-git-commit-history-clean/) looks something\n\nlike this:\n\n\n```shell\n\n$ git log --oneline\n\n\n8f8ef5af (HEAD -> my-change) More CI fixes\n\ne4fb7935 Apply suggestion from reviewer\n\nc1a1bec6 Apply suggestion from reviewer\n\n673222be Make linter happy\n\na0c30577 Fix CI failure for X\n\n5ff160db Implement feature Y\n\nf68080e3 Implement feature X\n\n3cdbc201 (origin/main, origin/HEAD, main) Merge branch 'other-change' into\n'main'\n\n...\n\n```\n\n\nIn this example there are 2 commits implementing feature X and Y, followed\nby a\n\nhandful of commits that aren't useful on their own. We used the fixup\nfeature of\n\nGit rebase to get rid of them.\n\n\n### Finding the commit\n\n\nThe idea of this technique is to integrate the changes of these follow-up\n\ncommits into the commits that introduced each feature. This means for each\n\nfollow-up commit we need to determine which commit they belong to.\n\n\nBased on the filename you may already know which commits belong together,\nbut if\n\nyou don't you can use git-blame to find the commit.\n\n\n```shell\n\ngit blame \u003Crevision> -L\u003Cstart>,\u003Cend> \u003Cfilename>\n\n```\n\n\nWith the option `-L` we'll specify a range of a line numbers we're\ninterested in.\n\nHere `\u003Cend>` cannot be omitted, but it can be the same as `\u003Cstart>`. You can\n\nomit `\u003Crevision>`, but you probably shouldn't because you want to skip over\nthe\n\ncommits you want to rebase away. Your command will look something like this:\n\n\n```shell\n\n$ git blame 5ff160db -L22,22 app/model/user.rb\n\n\nf68080e3 22) scope :admins, -> { where(admin: true) }\n\n```\n\n\nThis tells us line `22` was touched by `f68080e3 Implement feature X`.\n\n\nNow repeat this step until you know the commit for each of the commits you\nwant\n\nto rebase out.\n\n\n### Interactive rebase\n\n\nThe next step is to start the interactive rebase:\n\n\n```shell\n\n$ git rebase -i main\n\n```\n\n\nHere you're presented with the list of instructions in your `$EDITOR`:\n\n\n``` text\n\npick 8f8ef5af More CI fixes\n\npick e4fb7935 Apply suggestion from reviewer\n\npick c1a1bec6 Apply suggestion from reviewer\n\npick 673222be Make linter happy\n\npick a0c30577 Fix CI failure for X\n\npick 5ff160db Implement feature Y\n\npick f68080e3 Implement feature X\n\n```\n\n\nNow you'll need to change these instructions to something like this:\n\n\n```text\n\nfixup 8f8ef5af More CI fixes\n\nfixup e4fb7935 Apply suggestion from reviewer\n\nfixup 673222be Make linter happy\n\npick 5ff160db Implement feature Y\n\nfixup c1a1bec6 Apply suggestion from reviewer\n\nfixup a0c30577 Fix CI failure for X\n\npick f68080e3 Implement feature X\n\n```\n\n\nAs you can see I've reordered the commits, and I've changed some occurrences\nof\n\n`pick` to `fixup`.\n\n\nThe Git rebase will process this list bottom-to-top. It takes each line with\n\n`pick` and uses its commit message. On each line starting with `fixup` it\n\nintegrates the changes into the commit below. When you've saved this file\nand\n\nclosed your `$EDITOR`, the Git history will look something like this:\n\n\n```shell\n\n$ git log --oneline\n\n\ne880c726 (HEAD -> my-change) Implement feature Y\n\ne088ea06 Implement feature X\n\n3cdbc201 (origin/main, origin/HEAD, main) Merge branch 'other-change' into\n'main'\n\n...\n\n```\n\n\n## Autosquash\n\n\nUsing autosquash can be an alternative technique to the above. First we'll\n\nuncommit all the commits we want to get rid of.\n\n\n```shell\n\ngit checkout f68080e3\n\n```\n\n\nNow all changes only exist in your working tree, and are gone from the\ncommit\n\nhistory. You can use `git add` or `git add -p` to stage all changes related\nto\n\n`e088ea06 Implement feature X`. Instead of running `git commit` or `git\ncommit -m`\n\nwe'll use the `--fixup` option:\n\n\n```shell\n\n$ git commit --fixup e088ea06\n\n```\n\n\nNow the history will look something like:\n\n\n```shell\n\n$ git log --oneline\n\n\ne744646b (HEAD -> my-change) fixup! Implement feature X\n\n5ff160db Implement feature Y\n\nf68080e3 Implement feature X\n\n3cdbc201 (origin/main, origin/HEAD, main) Merge branch 'other-change' into\n'main'\n\n...\n\n```\n\n\nAll remaining changes should now belong to `5ff160db Implement feature Y` so\nwe\n\ncan run:\n\n\n```shell\n\n$ git add .\n\n\n$ git commit --fixup 5ff160db\n\n\n$ git log --oneline\n\n\n18c0fff9 (HEAD -> my-change) fixup! Implement feature Y\n\ne744646b fixup! Implement feature X\n\n5ff160db Implement feature Y\n\nf68080e3 Implement feature X\n\n3cdbc201 (origin/main, origin/HEAD, main) Merge branch 'other-change' into\n'main'\n\n...\n\n```\n\n\nYou can now review the `fixup!` commits and if you're happy with it, run:\n\n\n```shell\n\n$ git rebase -i --autosquash main\n\n```\n\n\nYou see we provide the extra option `--autosquash`. This option will look\nfor\n\n`fixup!` commits and automatically reorder those and set their instruction\nto\n\n`fixup`. Normally there's nothing for you to be done now, and you can just\nclose\n\nthe instruction list in your editor. If you type `git log` now you'll see\nthe\n\n`fixup!` commits are gone.\n\n\n## Alternatives\n\n\nFinally, there are some tools that allow you to _absorb_ commits more\neasily, for\n\nexample:\n\n\n* [lib.rs/crates/git-absorb](https://lib.rs/crates/git-absorb)\n\n* [github.com/MrFlynn/git-absorb](https://github.com/MrFlynn/git-absorb)\n\n* [gitlab.com/bertoldia/git-absorb](https://gitlab.com/bertoldia/git-absorb)\n\n* [github.com/tummychow/git-absorb](https://github.com/tummychow/git-absorb)\n\n*\n[github.com/torbiak/git-autofixup](https://github.com/torbiak/git-autofixup)\n\n\n[Cover image](https://unsplash.com/photos/qAShc5SV83M) by [Yung\nChang](https://unsplash.com/@yungnoma) on [Unsplash](https://unsplash.com/).\n\n{: .note}\n",[1684,9,1035],{"slug":3247,"featured":6,"template":699},"rebase-in-real-life","content:en-us:blog:rebase-in-real-life.yml","Rebase In Real Life","en-us/blog/rebase-in-real-life.yml","en-us/blog/rebase-in-real-life",{"_path":3253,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3254,"content":3259,"config":3264,"_id":3266,"_type":13,"title":3267,"_source":15,"_file":3268,"_stem":3269,"_extension":18},"/en-us/blog/reduce-cycle-time-digital-transformation",{"title":3255,"description":3256,"ogTitle":3255,"ogDescription":3256,"noIndex":6,"ogImage":924,"ogUrl":3257,"ogSiteName":685,"ogType":686,"canonicalUrls":3257,"schema":3258},"How to reduce cycle time when faced with the digital transformation","With every industry facing change at an accelerated pace, how do you quickly deliver value to customers?","https://about.gitlab.com/blog/reduce-cycle-time-digital-transformation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to reduce cycle time when faced with the digital transformation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Jeremiah\"}],\n        \"datePublished\": \"2019-03-19\",\n      }",{"title":3255,"description":3256,"authors":3260,"heroImage":924,"date":3261,"body":3262,"category":718,"tags":3263},[929],"2019-03-19","\n\nOver the past several years, the “hot topic” in the tech world has been digital\ntransformation, the act of accelerating software innovation to deliver value to\ncustomers at high speed. Technology and innovation create disruptions across every\nindustry – from retail to financial services – meaning everyone faces change at\na faster pace. [A recent study by the\nWorld Economic Forum](http://reports.weforum.org/digital-transformation/) found\nthat “digital transformation” impacts almost every sector and offers critical\nexamples of how mobile devices, internet of things, machine learning, and big\ndata collectively reshape our future. If you're an IT leader, you may ask yourself,\n“What is fast and how does my team go faster?”\n\n## What _is_ fast?\n\nThe first step in preparing for the digital transformation is to look at how you\nmeasure speed: cycle time.\n\niSixSigma has a great\n[definition of cycle time](https://www.isixsigma.com/dictionary/cycle-time):\n“The total time from the beginning to the end of your process, as defined by you\nand your customer. Cycle time includes process time, during which a unit is acted\nupon to bring it closer to an output, and delay time, during which a unit of work\nis spent waiting to take the next action.” In a nutshell, cycle time is the total\nelapsed time to move a unit of work from the beginning to the end of a physical process.\n\n>In a nutshell, cycle time is the total\nelapsed time to move a unit of work from the beginning to the end of a physical process.\n\nIt’s important to note that cycle time is not the same as\n[lead time](https://www.linkedin.com/pulse/what-lead-time-why-important-how-do-you-reduce-roland-lester/).\nCycle time tells you how efficient your development and delivery processes are,\nand lead time tells you how long customers wait for a new feature. If you have a\nlot of ideas in your backlog, you could have a short cycle time, but a long lead\ntime due to the backlog. However, if you can improve your DevOps lifecycle to\nachieve a fast cycle time, you can then rapidly respond to new business priorities.\n\n## How does your team go faster?\n\nSo, now you know how to measure speed, how do you reduce your cycle time, let\nalone your lead time?\n\n### Take stock first\n\nIt starts with understanding where your current delivery process has problems –\nwhere you’re creating\n[bottlenecks](https://about.gitlab.com/solutions/remove-bottlenecks/index.html),\nrework, or merely waiting for someone to do something. The objective of\n[value stream management](/solutions/value-stream-management/) is to define,\nmeasure, and improve the flow of value to your customers. In the case of IT and\napplication delivery, value stream management starts with your backlog of feature\nrequests and ends with the delivery of the features to your users.\n\n### Here’s a recipe to reduce cycle time:\n\n1. Measure your cycle time and lead time (cycle time is your process and lead time is what customers see).\n1. Identify the bottlenecks in your value stream (those things that stretch your cycle time).\n1. Improve your processes, automate, and streamline your value stream.\n1. Repeat step 1.\n\nIf you’re concerned about how the digital transformation will impact your business, I\nhighly recommend the\n[Digital Transformation Initiative Executive Summary](http://reports.weforum.org/digital-transformation/wp-content/blogs.dir/94/mp/files/pages/files/dti-executive-summary-20180510.pdf),\na fantastic report that’ll provide you with a comprehensive understanding of how\nit will create business value. As you improve your cycle time, you’ll be able to\nlower your lead time, because your delivery processes will be faster and more\nefficient. The key is to measure, understand, and improve your process.\n\nAre you ready to tackle the digital transformation? [Just commit.](/blog/strategies-to-reduce-cycle-times/)\n",[9,722],{"slug":3265,"featured":6,"template":699},"reduce-cycle-time-digital-transformation","content:en-us:blog:reduce-cycle-time-digital-transformation.yml","Reduce Cycle Time Digital Transformation","en-us/blog/reduce-cycle-time-digital-transformation.yml","en-us/blog/reduce-cycle-time-digital-transformation",{"_path":3271,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3272,"content":3278,"config":3283,"_id":3285,"_type":13,"title":3286,"_source":15,"_file":3287,"_stem":3288,"_extension":18},"/en-us/blog/remote-development-beta",{"title":3273,"description":3274,"ogTitle":3273,"ogDescription":3274,"noIndex":6,"ogImage":3275,"ogUrl":3276,"ogSiteName":685,"ogType":686,"canonicalUrls":3276,"schema":3277},"Behind the scenes of the Remote Development Beta release","Discover the epic journey of GitLab's Remote Development team as they navigate last-minute pivots, adapt, and deliver new features for users worldwide.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679888/Blog/Hero%20Images/remotedevelopment.jpg","https://about.gitlab.com/blog/remote-development-beta","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Behind the scenes of the Remote Development Beta release\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2023-08-16\",\n      }",{"title":3273,"description":3274,"authors":3279,"heroImage":3275,"date":3280,"body":3281,"category":832,"tags":3282},[1381],"2023-08-16","\nIn May 2023, the Create:IDE team faced an epic challenge – to merge the [Remote Development Rails monolith integration branch](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/105783) into the `master` branch of the GitLab Project. This was no small ask, as the merge request was of considerable size and complexity. In this blog post, we'll delve into the background, justifications, and process behind this endeavor.\n\nThe merge request titled \"Remote Development feature behind a feature flag\" was initiated by the Create:IDE team, aiming to merge the branch \"remote_dev\" into the \"master\" branch in the Rails monolith GitLab project. The MR contained `4` commits, `258` pipelines, and `143` changes that amounted to a total of `+7243` lines of code added to the codebase.\n\nInitially, the MR was created to reflect the work related to \"Remote Development\" under the \"Category: Remote Development.\" It was primarily intended to have CI pipeline coverage for the integration branch and was not meant for individual review or direct merging. The plan was to merge this code into the master branch via the [\"Remote Development Beta - Review and merge\" Epic](https://gitlab.com/groups/gitlab-org/-/epics/10258).\n\n![SUM](https://about.gitlab.com/images/blogimages/remote-development/SUM.png){: .shadow.medium}\n\n### How the Remote Development project started\nAs a team, we embarked on an ambitious journey to create a greenfield feature: the [Remote Development](https://docs.gitlab.com/ee/user/project/remote_development/) offering at GitLab. This feature had a vast scope, many unknowns, and required solving numerous new problems. To efficiently tackle this task, we decided to work on an integration branch using a [low-ceremony process](https://stackoverflow.com/questions/68092498/what-does-low-ceremony-mean). This decision enabled us to develop and release the feature in an impressively short time frame of less than four months.\n\nWorking on an integration branch provided us the flexibility to make significant progress, but it was always intended to eventually break down the work into smaller, iterative MRs that would follow the standard [GitLab review process](https://docs.gitlab.com/ee/development/code_review.html). We had a [detailed plan](https://gitlab.com/gitlab-org/remote-development/gitlab-remote-development-docs/-/blob/main/doc/integration-branch-process.md#master-mr-process-summary) for this process, but we realized that following the original plan would not allow us to meet our goal of releasing of the feature in GitLab 16.0.\n\n### Merging the integration branch MR without breaking it up\nDuring the development of the Remote Development feature, our team faced several challenges that led us to adopt a new approach for merging the integration branch into the master. First, as part of our [velocity-based XP/Scrum style process](https://about.gitlab.com/handbook/engineering/development/dev/create/ide/#-remote-development-iteration-planning), we realized that meeting the 16.0 release goal would require us to cut scope. A velocity report, \"[Velocity-based agile planning report](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/118436),\" highlighted that breaking down and reviewing individual MRs would take too long, considering the impending due date and the likelihood of last-minute scope additions.\n\nSecond, we [made the decision](https://gitlab.com/gitlab-org/gitlab/-/issues/398227#note_1361192858) to release workspaces as a **beta feature for public projects** for customers in [GitLab 16.0](/releases/2023/05/22/gitlab-16-0-released/#remote-development-workspaces-available-in-beta-for-public-projects). This approach reduced the complexity of the rollout plan and allowed us to get valuable feedback earlier, but required us to enable the feature by default earlier than planned. To align with this decision, we determined that merging the integration branch after review was the best course of action. An announcement was made to explain the change in plan, and we set specific timelines for the review process to ensure smooth coordination.\n\n> Hello Reviewers/Maintainers 👋 We have opened up a Zoom room through all of next week as an easy sync place for us all to collaborate and triage questions. As the MR is quite large, it might be overwhelming to determine where to begin. To help, we will aim to furnish a summary of what we have included, such as two new database tables and a couple of GraphQL/REST APIs. We will also be available through the week in the Zoom room and without it being too prescriptive of a approach, I would suggest we do a sync walkthrough of the MR first and then kick off the reviews.\n\nAddressing the concerns about risk, team members discussed the challenges and potential solutions. While there were apprehensions, we were confident in the overall quality of the feature. A disciplined plan for merging MRs was initially considered, but based on our velocity metrics, it was evident that meeting the public beta release goal required a new strategy.\n\nDespite the deviations from our usual practices, we acknowledged the urgency to deliver the initial release on time. The decision was not taken lightly, and we ensured that the merge had extensive [test coverage](https://docs.gitlab.com/ee/ci/testing/test_coverage_visualization.html) and [feature flags](https://docs.gitlab.com/ee/operations/feature_flags.html) in place to address any potential issues. We accepted that some aspects would be overlooked in the initial MR review cycle, but we committed to addressing them in subsequent iterations.\n\n### Keeping the pipeline green and stable for the merge\nTo ensure the successful merge of the integration branch containing the Remote Development feature, our team made significant efforts to keep the pipeline green and stable. As the MR was quite large and contained critical functionality, it was crucial to maintain a high level of quality and reduce the risk of introducing regressions.\n\nTo address these challenges, the team adopted a disciplined approach to [CI/CD](https://about.gitlab.com/topics/ci-cd/). Throughout the development process, CI pipelines were carefully monitored, and any failing tests or issues were promptly addressed. The team conducted rigorous testing and code reviews to identify and fix potential bugs and ensure that the changes did not negatively impact the existing functionality of the codebase.\n\nAdditionally, extensive test coverage was put in place to ensure that the new feature worked as expected and did not cause unintended side effects. The team utilized GitLab's [test coverage visualization](https://docs.gitlab.com/ee/ci/testing/test_coverage_visualization.html) capabilities to track the extent of test coverage and identify areas that required additional testing.\n\n![PIPE](https://about.gitlab.com/images/blogimages/remote-development/PIPE.png){: .shadow.medium}\n\n## The merging process\nAs part of the Remote Development team, we took a strategic approach to the merging process. We identified three categories of follow-up tasks that needed to be addressed after the release:\n\n1. **To-dos:** This category encompassed follow-up issues that required further attention.\n2. **Disabled linting rules:** Any issues related to disabled linting rules were included in this category.\n3. **Follow-up from review:** Non-blocking concerns raised during the review process were categorized here.\n\nTo manage this process effectively, we organized these categories into [child epics](https://docs.gitlab.com/ee/user/group/epics/manage_epics.html#multi-level-child-epics) under the main epic representing the merging effort.\n\n1. Child epic for [to-do follow-up issues](https://gitlab.com/groups/gitlab-org/-/epics/10472)\n2. Child epic for [disabled linting rules follow-up issues](https://gitlab.com/groups/gitlab-org/-/epics/10473)\n3. Child epic for [follow-up issues from review](https://gitlab.com/groups/gitlab-org/-/epics/10474)\n\n\n## Reviewer resources\nDuring the integration branch merge process for the Remote Development feature, we ensured a smooth and collaborative review experience for all involved. To facilitate this, we set up the following resources and documented the information in GitLab's issue, epic, and MR reviews for better persistence and traceability:\n\n1. **Dedicated Slack channel:** We had a Slack channel that served as our primary hub for coordinating reviews and resolving any blockers that arose during the process. The discussions, decisions, and important points discussed in this channel were documented in the related GitLab issues and epics. This approach enabled us to maintain a historical record of the conversations for to refer back to in the future.\n2. **General Slack channel:** For non-urgent or non-blocking questions and discussions, reviewers could use the a general Slack channel. Similar to the dedicated channel, we documented the relevant information from this channel in the corresponding issues and MR reviews in GitLab.\n3. **Addressing urgent issues:** When urgent issues required immediate attention, reviewers could directly address our technical leads [Vishal Tak](https://gitlab.com/vtak) and/or [Chad Woolley](https://gitlab.com/cwoolley-gitlab) in their Slack messages. However, we kindly requested that [direct messages were avoided](https://handbook.gitlab.com/handbook/communication/#avoid-direct-messages) to promote open collaboration. The resolutions to these urgent issues were documented in the corresponding GitLab issues or MR discussions.\n4. **Zoom collaboration room:** The collaborative sessions held in the open Zoom room were not only beneficial for real-time discussions but also for fostering a collaborative environment. After each session, we summarized the key points and decisions made during the meeting in the associated GitLab issue or MR, making sure all important outcomes were captured and accessible to the team.\n\nThroughout the review process, we were committed to maintaining a seamless and well-documented workflow. By capturing all relevant information in GitLab issues, epics, and MR reviews, we ensured that the knowledge was persistently available, and future team members could easily understand the context and decisions made during the integration process.\n\n## Application security review\nDuring the application security review process, we focused on providing a secure and reliable Remote Development feature for our users. Here are the key resources and updates related to the application security review:\n\n1. **Main application security review issue:** The main application security review issue served as the central hub for tracking security-related considerations. You can find the defined process we followed [here](https://about.gitlab.com/handbook/security/security-engineering/application-security/appsec-reviews.html).\n2. **Application security review comment:** The application security review issue contained a comment indicating that the merge was not blocked unless there were severe issues that could impact production. \"In order to maintain a smooth merge process, we do not block MRs from being merged unless we identify severe issues that could prevent the feature from going into production, such as S1 or S2 level problems. If you are aware of any design flaws or concerns that might qualify as such issues, please bring them to our attention. We can review them together and address any questions or concerns that arise. Let's work collaboratively to find an approach that works for both parties. 👍\"\n3. **Engineering perspective:** For managing the application security review process from an engineering team perspective, we had a dedicated issue, which is kept confidential for security reasons.\n4. **Security and authentication matters:** All security and authentication concerns pertaining to the Beta release were documented within the [`Remote Development Beta -Auth` epic](https://gitlab.com/groups/gitlab-org/-/epics/10377). As of April 30, 2023, we are delighted to announce that **no known issues or obstacles were found that would impede the merge**. This represents a significant accomplishment, considering the intricate nature of this new feature.\n5. **Initial question raised:** During the application security review, one initial question was raised, and we promptly addressed it. You can track the issue and our response [here](https://gitlab.com/gitlab-org/gitlab/-/issues/409317).\n\n## Database review\nTo ensure the reliability and efficiency of the Remote Development feature, we sought guidance from the database reviewer. Although the team had not conducted a thorough self-review, we were fully prepared to address any blocking issues raised during the review process. Our references for the review were:\n\n- [Database review documentation](https://docs.gitlab.com/ee/development/database_review.html)\n- [Database reviewer guidelines](https://docs.gitlab.com/ee/development/database/database_reviewer_guidelines.html)\n\nAs an example, during the database migration review, a discussion arose between [Alper Akgun](https://gitlab.com/a_akgun) and Chad, regarding the efficient ordering of columns in the workspaces table. Alper initially suggested placing integer values at the beginning of the table based on relevant documentation.\n\nChad questioned the benefit of this suggestion, pointing out that the specific integer field, `max_hours_before_termination`, would still be padded with empty bytes even if moved to the front, due to its current position between two text fields.\n\nAlper proposed an alternative approach, emphasizing that organizing variable-sized fields (such as `text`, `varchar`, `arrays`, `json`, `jsonb`) at the end of the table could be sufficient for the workspaces table.\n\nUltimately, Chad took the initiative to implement the changes, moving all variable length fields to the end of the table, and documented the discussion as a comment to address review suggestions.\n\nWith this collaborative effort, the workspaces table was efficiently optimized, and the team gained valuable insights into database column ordering strategies.\n\n![DB](https://about.gitlab.com/images/blogimages/remote-development/DB.png){: .shadow.medium}\n\n## Ruby code review\nDuring the Ruby code review phase, we followed a meticulous approach by conducting a comprehensive self-review of every line of code. Our goal was to ensure the highest code quality and address any potential issues identified by the reviewers effectively.\n\nTo ensure clarity, it's important to clarify that the Ruby code review primarily focused on backend changes and server-side improvements. This included optimizing performance, enhancing functionalities, and refining the overall codebase to deliver a seamless user experience.\n\nFor the code review process, we referred to the [Code review documentation](https://docs.gitlab.com/ee/development/code_review.html), a valuable resource that guided us in maintaining industry best practices and adhering to the GitLab community's coding standards.\n\n### Example: Enhance error messages for unavailable features\nAs an example during the code review, we addressed an essential aspect of the workspace method, focusing on how we handle scenarios related to the `remote_development_feature_flag` and the `remote_development` licensed feature. The primary objective was to enhance the error messages presented to users when these features are not available.\n\nInitially, the code employed identical error messages for both cases, making it less clear to users whether the issue was due to a missing license or a disabled feature flag. This ambiguity could lead to confusion and hinder the user experience.\n\n#### The suggested improvement\nDuring the review, one of our maintainers, [Peter Leitzen](https://gitlab.com/splattael), raised an important question: \"Are we OK with having only a single error message for both cases (missing license and missing feature flag)?\"\n\nRecognizing the importance of clear communication, Chad proposed enhancing the error messages to provide distinct descriptions for each case. This improvement aimed to empower users by precisely conveying the reason behind the unavailability of certain features.\n\n#### The revised implementation\nFollowing Chad's suggestion, the code underwent the following changes:\n\n```ruby\nunless ::Feature.enabled?(:remote_development_feature_flag)\n  # TODO: Could have `included Gitlab::Graphql::Authorize::AuthorizeResource` and then use\n  #       raise_resource_not_available_error!, but didn't want to take the risk to mix that into\n  #       the root query type\n  raise ::Gitlab::Graphql::Errors::ResourceNotAvailable,\n    \"'remote_development_feature_flag' feature flag is disabled\"\nend\n\nunless License.feature_available?(:remote_development)\n  # TODO: Could have `included Gitlab::Graphql::Authorize::AuthorizeResource` and then use\n  #       raise_resource_not_available_error!, but didn't want to take the risk to mix that into\n  #       the root query type\n  raise ::Gitlab::Graphql::Errors::ResourceNotAvailable,\n    \"'remote_development' licensed feature is not available\"\nend\n\nraise_resource_not_available_error!('Feature is not available') unless current_user&.can?(:read_workspace)\n```\n\n#### The value of distinct error messages\nBy implementing distinct and descriptive error messages, we reinforce our commitment to user-centric development. Users interacting with our system will receive accurate feedback, helping them navigate potential roadblocks effectively. This enhancement not only improves the user experience but also streamlines troubleshooting and support processes.\n\nThis code review example highlights the significance of concise and informative error messages in delivering a top-notch user experience within the GitLab ecosystem. Our team's collaborative efforts ensure that users can confidently interact with our platform, knowing they'll receive clear and helpful error messages when needed.\n\n![BE1](https://about.gitlab.com/images/blogimages/remote-development/BE1.png){: .shadow.medium}\n\n### Example: Improving performance and addressing N+1 issues in WorkspaceType\nIn a recent code review, our team focused on optimizing the WorkspaceType and addressing potential N+1 query problems. The discussion involved two key contributors, [Laura Montemayor](https://gitlab.com/lauraX) and Chad, who worked together to enhance the performance of the codebase.\n\n#### Identifying the performance concerns\nDuring the review, Laura raised a performance concern regarding the possibility of N+1 queries in the WorkspaceType resolver. She suggested that preloading certain associations could be beneficial to avoid this common performance issue.\n\n#### A separate issue for N+1 control\nChad took prompt action and created a separate issue specifically aimed at resolving the N+1 query problems. The new issue, titled \"Address review feedback: Resolve N+1 issues,\" would address the concerns raised by Laura and implement the necessary preloading.\n\n#### Evaluating the potential N+1 impact\nChad provided insightful information about the low risk of real N+1 impact from two particular fields in the current implementation. He elaborated on how the queries for user and agent associations would largely be cache hits due to scoping and usage patterns. Chad diligently examined the cache hits happening in development, confirming the potential optimization.\n\nHere's a code snippet from the initial implementation:\n\n```ruby\n# Initial Implementation\nclass WorkspaceType \u003C BaseType\n  field :user, ::Types::UserType,\n    description: \"User associated with this workspace\",\n    null: true\n\n  field :agent, ::Types::AgentType,\n    description: \"Agent associated with this workspace\",\n    null: true\n\n  # Resolver for the user association\n  def user\n    object.user\n  end\n\n  # Resolver for the agent association\n  def agent\n    object.agent\n  end\nend\n```\n\n#### Treating performance as a priority\nBoth contributors acknowledged the significance of addressing the performance concern, with Laura emphasizing its importance. They agreed to prioritize the separate issue dedicated to resolving the N+1 queries and ensuring proper test coverage.\n\nHere's a code snippet from the revised implementation:\n\n```ruby\n# Revised Implementation with Preloading\nclass WorkspaceType \u003C BaseType\n  field :user, ::Types::UserType,\n    description: \"User associated with this workspace\",\n    null: true\n\n  field :agent, ::Types::AgentType,\n    description: \"Agent associated with this workspace\",\n    null: true\n\n  # Resolver for the user association with preloading\n  def user\n    ::Dataloader.for(::User).load(object.user_id)\n  end\n\n  # Resolver for the agent association with preloading\n  def agent\n    ::Dataloader.for(::Agent).load(object.agent_id)\n  end\nend\n```\n\n#### Considering future usage\nChad expressed excitement about the possibility of the new feature gaining significant usage. He humorously stated that encountering enough legitimate traffic on workspaces to trigger any performance impact would be a delightful problem to have, as it would indicate a growing user base.\n\n#### Collaboration and performance improvement\nThe code review exemplifies the collaborative and proactive approach of our team in optimizing the WorkspaceType. The team's dedication to addressing performance concerns ensures that our codebase remains performant and efficient, even as our user base grows.\n\n![BE2](https://about.gitlab.com/images/blogimages/remote-development/BE2.png){: .shadow.medium}\n\n## Frontend code review\nThe frontend code review process was managed by our resident `Create: IDE` frontend maintainers, [Paul Slaughter](https://gitlab.com/pslaughter) and [Enrique Alcátara](https://gitlab.com/ealcantara). Additionally, a significant portion of the new frontend UI code had already undergone separate reviews and was merged to master, contributing to the overall quality of the Remote Development feature.\n\n### Example: Collaborative code improvement for ApolloCache Mutators\nPaul started a thread on an old version of the diff related to `ee/spec/frontend/remote_development/pages/create_spec.js``. The code snippet in question involved creating a mock Apollo instance and writing queries to the cache.\n\n#### The initial implementation\nInitially, the code involved writing to the cache twice, which raised concerns among the maintainers, Paul and Enrique. Paul pointed out that the duplicate write was unintentional and wondered if the writeQuery was even necessary, given the removal of @client directives. However, he also acknowledged the need to test that the created workspace was added to the ApolloCache.\n\n```javascript\n// Initial Implementation\nconst buildMockApollo = () => {\n  // ... Other mock setup ...\n\n  // Initial writeQuery for userWorkspacesQuery\n  mockApollo.clients.defaultClient.cache.writeQuery({\n    query: userWorkspacesQuery,\n    data: USER_WORKSPACES_QUERY_EMPTY_RESULT.data,\n  });\n\n  // ... Other mock setup ...\n};\n```\n\n#### Identifying a potential issue\nEnrique agreed that the duplicate write was unintentional and probably introduced during a rebase. He explained that pre-populating the cache with a user workspaces query empty result was essential for the mutator to have a place to add the workspace. However, he encountered difficulties in making the workaround work effectively in unit tests.\n\n#### Resolving the issue\nPaul highlighted the significance of pre-populating the cache with the user workspaces query empty result. He suggested leaving a comment to explain the necessity of the initial writeQuery, as it would be implicitly coupled to future writeQuery operations.\n\n```javascript\n// Resolving the Issue - Leaving a Comment\n// Pre-populate the cache with user workspaces query empty result to provide a place\n// for the mutator to add the Workspace later. This is needed for both test and production environments.\nmockApollo.clients.defaultClient.cache.writeQuery({\n  query: userWorkspacesQuery,\n  data: USER_WORKSPACES_QUERY_EMPTY_RESULT.data,\n});\n```\n\nHowever, upon further investigation, Paul discovered that the writeQuery might not be needed, and the issue might be a symptom of an underlying problem. He decided to open a separate thread to address this concern and indicated that he would work on a separate MR to handle it.\n\n```javascript\n// Resolving the Issue - Opening a Separate Thread and MR\n// Open a separate thread to discuss potential underlying issues.\n// Plan to work on a separate MR to handle it.\n// Stay tuned for updates!\n```\n\n![FE](https://about.gitlab.com/images/blogimages/remote-development/FE.png){: .shadow.medium}\n\n## What we learned\nAs part of the Remote Development team, we faced the challenge of merging the Remote Development Rails monolith integration branch to meet our ambitious release goal. We adapted to last-minute pivots and focused on minimizing risks during the review process. The successful merge brought us one step closer to benefiting GitLab users worldwide. We acknowledged areas for improvement and remained committed to refining the feature's quality. Our journey reflects our dedication to delivering results, embracing change, and pushing boundaries in the DevOps community. The release of the Remote Development feature in GitLab 16.0 is a significant milestone for GitLab, and we continue to iterate and grow, providing innovative solutions for developers worldwide.\n\nAn outcome of this process was an ongoing conversation to propose a [simplified review process for greenfield features](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/125117). Through this proposal, we aim to distill the lessons we learned during this experience and provide guidance to future teams facing similar challenges.\n\n## What is next for Remote Development?\nAfter the merge of the MR, several changes were implemented:\n- The first production tests were conducted to ensure the stability and functionality of the merged code.\n- Collaboration took place between the Dev Evangelism and Technical Marketing teams, focusing on [creating content](https://gitlab.com/groups/gitlab-com/marketing/developer-relations/-/epics/190). This collaboration aimed to troubleshoot any issues that arose during the merge.\n- Feedback from the community was taken into account, and changes were made to address the concerns raised. This feedback was incorporated into an [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/410031) and influenced the overall roadmap and direction of the project.\n\nDo you want to [contribute to GitLab](/community/contribute/)? Come and join in the conversation in the `#contribute` channel on GitLab's [Discord](https://discord.gg/gitlab), or just pop in and say \"Hi.\"\n\n",[1035,1159,9,1925,1926,721],{"slug":3284,"featured":6,"template":699},"remote-development-beta","content:en-us:blog:remote-development-beta.yml","Remote Development Beta","en-us/blog/remote-development-beta.yml","en-us/blog/remote-development-beta",{"_path":3290,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3291,"content":3297,"config":3302,"_id":3304,"_type":13,"title":3305,"_source":15,"_file":3306,"_stem":3307,"_extension":18},"/en-us/blog/remote-enables-innovation",{"title":3292,"description":3293,"ogTitle":3292,"ogDescription":3293,"noIndex":6,"ogImage":3294,"ogUrl":3295,"ogSiteName":685,"ogType":686,"canonicalUrls":3295,"schema":3296},"How remote work enables rapid innovation at GitLab","At GitLab, remote isn’t a business operations risk, it’s a competitive advantage.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678666/Blog/Hero%20Images/paper-lanterns.jpg","https://about.gitlab.com/blog/remote-enables-innovation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How remote work enables rapid innovation at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Victor Wu\"}],\n        \"datePublished\": \"2019-02-27\",\n      }",{"title":3292,"description":3293,"authors":3298,"heroImage":3294,"date":3299,"body":3300,"category":811,"tags":3301},[739],"2019-02-27","\nI’m a Product Manager here at GitLab, primarily contributing to the [Plan stage](/direction/plan/)\nof the [DevOps lifecycle](/stages-devops-lifecycle/). I joined in November 2016 and I’ve witnessed incredible\ngrowth in GitLab the product as well as GitLab the team. Many\nnew hires have asked me during [coffee chats](/company/culture/all-remote/#coffee-chats)\nabout GitLab culture and remote work in particular, since we're an [all-remote](/company/culture/all-remote/)\ncompany. My view has evolved over this time and I wanted to share specifically why I think\nremote is _not_ a challenge to overcome, but actually a _competitive advantage_, at least for GitLab.\n\n## A remote journey\n\nWhen I joined GitLab, I thought remote was a challenge to overcome or at least\nto manage. It was a risk to be mitigated. For example, I really wanted daily standup\nmeetings with the engineering team I was working with. Silicon Valley-style tech\ncompanies and product management books tell us that frequent, synchronous, face-to-face\ncommunication is necessary for building successful products efficiently and to win\nin the marketplace. To my dismay at the time, we never had in-sync standups (and\nmy team today still doesn’t have them). But curiously, we nonetheless had immense\ncollaboration and continued to ship product at a high velocity. Something really\nweird and unexpected was going on.\n\nLater on, as I started getting comfortable [doing product the GitLab way](/handbook/product/),\nI started to think that remote wasn’t really a risk, but that there were just a\nfew negatives, and that the overall effect was net positive. See the [advantages and disadvantages of remote](/company/culture/all-remote/#advantages-for-employees).\n\nToday, I realize that even a positive-negative accounting of remote is insufficient\nto articulate what remote means at GitLab. I think that remote\n(along with a few other key crucial GitLab ingredients) gives us a differentiated\nand competitive advantage, in particular allowing us to innovate at a rapid pace\nthat is truly unique. Here's why:\n\n## Interdependent ingredients\n\nThere are a several crucial and interdependent GitLab ingredients that make remote\ntruly work in our favor:\n\n### Async communication\n\nRemote implies geographic diversity (since we hire all over the world),\nand because most folks work during the day, that further implies time zone diversity.\nConsequently, we prefer **[Async communication (primarily with text)](https://handbook.gitlab.com/handbook/communication/)** as we scale our organization in\nspace-time. Async demands everything be written down and that it be clear and concise.\nYou can’t afford a prolonged back-and-forth conversation because every round-trip\ntransaction is possibly 24 hours in the worst case. In particular, we prefer text\nbecause the internet and modern apps (for example [GitLab issues](https://docs.gitlab.com/ee/user/project/issues/)) has allowed text\nto be easily organizable, searchable, and even hyperlinked. Text is easy to parse\nand thus consume. It is a highly efficient form of communication, especially for\ntransactional collaboration.\n\n### Transparency\n\nThe async communication we reference is also digital, making it infinitely\nscalable. Unlike the printed page in a physical office, anybody should\nbe able to access a digital message. So, rather than re-erecting the walls and silos\nthat plague traditional organizations and inevitably block collaboration, we\nmake communications and work **[transparent](https://handbook.gitlab.com/handbook/values/#transparency)** by default.\nAdding a layer of permissions is necessary sometimes, and in those cases it becomes an overhead cost to manage\nand use (for example fixing a security bug.) The transmitter of communications\nneeds to figure out who should receive, and set the appropriate permissions. The\nreceiver themself needs additional work to access the content. It’s more pain. It\nadds up. So we try to avoid it when we can.\n\n>Because you know everything you write down will potentially be viewed by anyone – inside or even outside the company – simply telling the truth is the optimal and most efficient strategy\n\nTransparency also makes it really easy to tell the truth, and disincentivizes dishonesty.\nTelling the truth is simply the right thing to do, but it’s also a great strategy\nto grow a long-term sustainable business. In particular, because you know everything\nyou write down will potentially be viewed by anyone in the company or even outside\nthe company, simply telling the truth is the optimal and most efficient strategy\nand you will thus adopt it with little friction. You don’t have to make up slightly\ndifferent versions for different stakeholders. You don’t have to keep track of all\nthese versions. And you only need a single artifact to document that one source\nof truth, which will never be out of sync, because there’s only one! For\nus, that single source of truth is typically the description in an issue.\n\n### Everyone can contribute\n\nWith a single source of truth that is consumable by anybody, it allows **[everyone to contribute](/company/mission/#mission)**.\nEveryone has information parity. And so anyone is welcome to contribute. In fact,\nremember I mentioned above that the transmitter of information typically has an intended receiver\nin mind? In this case, oftentimes somebody who they didn’t expect can even participate\nand add value. This isn’t possible if there’s no transparency because artificial\nbarriers pre-close the opportunities of potential collaboration. Also, everyone\ncan contribute means future folks can participate too. You may start a conversation\non an idea that turns out to be suboptimal in the current circumstances. But it\nmight end up being just a timing issue. And so posterity might be able to recover\nthe old idea and ship a feature later on, taking advantage of all the discussions\nthat were had and made available publicly.\n\nEveryone can contribute also means that the diversity of ideas skyrockets. And so\nat GitLab, people often cross departments and offer some of the best ideas to solve\nbig challenging problems. But we still have [directly responsible individuals](/handbook/people-group/directly-responsible-individuals/)\nto make decisions in order to avoid analysis paralysis.\n\n### Iteration\n\nFinally, how can all this communication and collaboration truly function if the\nmechanisms are so transactional, distributed, and unstructured? It works because\nit forces us to be **[iterative](https://handbook.gitlab.com/handbook/values/#iteration)**. Most people think they understand iteration (myself\nincluded) before joining GitLab. But I’ve discovered over and over again that new\nfolks are surprised that this concept is taken to an extreme. Product\nand code are shipped in the absolute smallest piece possible in an effort to get\nfeedback and momentum. Implementing programs and processes at GitLab means breaking\noff the smallest chunk and then putting it into action right away. We still make\nbig, bold plans and big bets on the future. But we don’t obsess over extended analysis.\nInstead we find the smallest thing that we can do now and we do it. We believe that\nwaiting until tomorrow is an opportunity cost. Doing something small today is low\nrisk and results in immediate feedback. We have a [bias for action](https://handbook.gitlab.com/handbook/values/#bias-for-action).\n\n>We believe that waiting until tomorrow is an opportunity cost. Doing something small today is low\nrisk and results in immediate feedback.\n\nAnd so if all our communication and collaboration is focused on small iterations,\nthe scope of a typical  problem is small and manageable. And it turns out (unsurprisingly)\nmore people are willing to participate in a small problem if it literally takes\nthem a few moments to voluntarily glance at an issue description, instead of being\nforced to attend a two-hour slide presentation explaining a big problem.\nAnd since the problem is made transparent by default, the pool of contributors is\nvery high, as mentioned earlier. Personally, I am actively involved\nin at least 20 to 30 parallel problem conversations on a daily basis. It is impossible\nfor anyone to achieve that level of productivity if all to those conversations required\ndedicated, ongoing, synchronous meetings. This results in an incredible rate of collaboration\nfor myself. Multiply that by all team members at GitLab, and then also all GitLab\ncommunity members further still, and you can see now why GitLab’s pace of innovation\nis ridiculously high.\n\nRemote is not a challenge for GitLab to overcome. It’s a clear business advantage.\n\n## Ending caveat\n\nThe picture I’ve painted here is one of constant messaging and wild ideas. And\nthat’s intentional because it’s true. New folks joining GitLab often are inundated\nby the number of discussions they find themselves involved in after several weeks\nin. This is indeed an ongoing risk for GitLab especially as we scale and the level\nof ideation grows exponentially in relation to headcount (since communication links\ngrow exponentially as nodes in a people network grow). I’ve observed that GitLab\nteam members usually figure out a way to cope soon enough, and typically become\nmore selective in their communications over time. I think this is a good general\nstrategy overall, because good ideas tend to get more attention, and we essentially\nrely on the wisdom of the crowds to surface them. Of course we still have well-defined\nroles and responsibilities that serve as guardrails too, that allow subject matter\nexperts and directly responsible individuals to strategically guide our innovation\nin the right general direction.\n\nHow are you making remote work work? Let us know in the comments or tweet us [@gitlab](https://twitter.com/gitlab).\n\n[Cover image](https://unsplash.com/photos/TaXPogWdzR0) by [amseaman](https://unsplash.com/@amseaman) on Unsplash\n{: .note}\n",[696,790,1241,3034,9],{"slug":3303,"featured":6,"template":699},"remote-enables-innovation","content:en-us:blog:remote-enables-innovation.yml","Remote Enables Innovation","en-us/blog/remote-enables-innovation.yml","en-us/blog/remote-enables-innovation",{"_path":3309,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3310,"content":3316,"config":3322,"_id":3324,"_type":13,"title":3325,"_source":15,"_file":3326,"_stem":3327,"_extension":18},"/en-us/blog/remote-work-done-right",{"title":3311,"description":3312,"ogTitle":3311,"ogDescription":3312,"noIndex":6,"ogImage":3313,"ogUrl":3314,"ogSiteName":685,"ogType":686,"canonicalUrls":3314,"schema":3315},"Remote work, done right","Guest author Nolan Myers hated conference calls. Here's how we changed his mind.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679812/Blog/Hero%20Images/remote-work-done-right.jpg","https://about.gitlab.com/blog/remote-work-done-right","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Remote work, done right\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nolan Myers\"}],\n        \"datePublished\": \"2018-03-16\",\n      }",{"title":3311,"description":3312,"authors":3317,"heroImage":3313,"date":3319,"body":3320,"category":811,"tags":3321},[3318],"Nolan Myers","2018-03-16","\n\n_GitLab CEO Sid Sijbrandij occasionally sits down for a \"[pick your brain](/handbook/eba/ceo-scheduling/#pick-your-brain-meetings)\"\nmeeting with people seeking advice on open source, remote work, or discussion of other things related to GitLab._\n\nI’ve been on many terrible conference calls. The gentle voice telling me to enter my nine-digit pin, followed by the pound sign, feels like disappointment before the call even begins. That’s why I was so surprised to hear that GitLab – a company of over 200 people – runs without an office. How could anything get done when every meeting was remote?\n\n\u003C!-- more -->\n\nSeeing is believing, so I jumped at the opportunity to watch firsthand. What I learned convinced me that remote meetings can be just as good as in person, and maybe even better. Here’s what impressed me:\n\n### Video conference for all\n\nEveryone joined a Zoom call, each from their own computer. Most everyone had their cameras on, which gave enough visual cues to see their mood; sometimes even an understanding of who they are, like seeing a pool table or disassembled motorcycle behind them. The video format helped enforce some good meeting practices. Only one speaker at a time; a singular focus of attention, either a person or a shared screen. Meetings started on time, never having to wait for a previous group to clear a conference room. Having everyone join independently also worked much better than having a few people in a room and a few remotes, which inevitably creates a power-center in the room.\n\n>The video format helped enforce some good meeting practices: only one speaker at a time; a singular focus of attention\n\n### Create a live agenda in a shared document\n\nEach meeting started with an agenda in a shared Google Doc. They coupled this with a “write before you speak” etiquette. Anyone was welcome to speak, and added a brief summary of their question or comment into the shared doc before chiming in. This encouraged the speaker to be deliberate about their point, think about where in the flow it made most sense, and to know they’d get the floor when appropriate. It was kind of a marvel to see bullets and sub-bullets evolve during the meeting. A task owner typed “TODO: follow up” right as they said “I got it.” Even better, they were left with detailed meeting notes for posterity.\n\n>It was kind of a marvel to see bullets and sub-bullets evolve during the meeting. A task owner typed “TODO: follow up” right as they said “I got it.”\n\n### Embrace multitasking\n\nHow often have you heard that you should give a meeting your undivided attention? And how often have you actually believed it? GitLab embraces multitasking. Having everyone together ensures the right people are there for important conversations. But inevitably a packed meeting agenda will have sections more and less relevant to a variety of participants. Unlike in a room, a video call where someone tunes out for a bit doesn’t hamper the effectiveness of those focused on a conversation. The shared agenda let everyone know when they were needed, and each topic had the right people ready to contribute.\n\n### Caveats and considerations\n\nThis process felt like a miniature miracle to watch, but does need the right tools. GitLab relied on Zoom and it worked well. One external call used WebEx, and its longer latency led people accidentally to talk over one another. Google Docs was a must for the shared agenda. Everyone had set up a reasonable workspace with fast internet and a camera.\n\nI’d also add that I saw this work well for both update- and decision-oriented meetings. Would this approach support technical brainstorming meetings too? Sometimes drawing on a whiteboard works much better than typing, especially if you have a diagram. Zoom does have a whiteboard feature; perhaps with a Stylus you could do this as well as in person. I’m curious to see it in practice.\n\nWhen I first heard of GitLab’s remote-only hiring, I immediately saw the benefits of hiring in lower-rent locations and not paying for office space. I assumed that it cost some productivity through effective collaboration. Now I see video calls done right can beat all but the best traditional conference room meetings.\n\n## About the guest author\n\nNolan Myers advises startups on organizational development and customer success, leveraging his executive experience in building high-performing products and teams. He also has passions for classical music, fine cuisine, and urban design. Learn more on his [LinkedIn](https://linkedin.com/in/nolanmyers).\n\nPhoto by [Christin Hume](https://unsplash.com/photos/slbqShqAhEo) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[1241,790,9,696,721],{"slug":3323,"featured":6,"template":699},"remote-work-done-right","content:en-us:blog:remote-work-done-right.yml","Remote Work Done Right","en-us/blog/remote-work-done-right.yml","en-us/blog/remote-work-done-right",{"_path":3329,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3330,"content":3336,"config":3342,"_id":3344,"_type":13,"title":3345,"_source":15,"_file":3346,"_stem":3347,"_extension":18},"/en-us/blog/reviewer-roulette-one-year-on",{"title":3331,"description":3332,"ogTitle":3331,"ogDescription":3332,"noIndex":6,"ogImage":3333,"ogUrl":3334,"ogSiteName":685,"ogType":686,"canonicalUrls":3334,"schema":3335},"Reviewer Roulette: (Just about) one year on","Learn how Reviewer Roulette has evolved at GitLab over the last year.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672195/Blog/Hero%20Images/play-reviewer-roulette.jpg","https://about.gitlab.com/blog/reviewer-roulette-one-year-on","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Reviewer Roulette: (Just about) one year on\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nick Thomas\"}],\n        \"datePublished\": \"2019-10-23\",\n      }",{"title":3331,"description":3332,"authors":3337,"heroImage":3333,"date":3339,"body":3340,"category":832,"tags":3341},[3338],"Nick Thomas","2019-10-23","\nJust over a year ago, [Dennis Tang](/company/team/#dennis)\nintroduced us to [Reviewer Roulette](/blog/play-reviewer-roulette/).\nThis was a shiny new tool designed to help us to find reviewers for our code.\nAt the time, our engineering department had around 150 people in it. At GitLab,\n[all our engineers are reviewers](/handbook/engineering/workflow/code-review/#reviewer),\nbut reviews were being unevenly distributed across them.\n\nA year on, and with more than 380 people in engineering available to review,\nwe're still using a form of Reviewer Roulette – but its implementation, and how\nwe interact with it, has changed significantly. So, what's changed, and what's\nstayed the same?\n\n## The good\n\nFirst off, roulette works really well. Code reviews can be time-consuming, and\nthey're a major part of quality control at GitLab, so it's crucial that we\ndistribute the load – research shows that [review quality nosedives](https://smartbear.com/learn/code-review/best-practices-for-peer-code-review/)\nif you spend too much time doing it. It's even more\nimportant for our maintainers. We try to maintain a ratio of engineers to maintainers of around\n4:1, but if half of the reviews go to a quarter of the maintainers, some will\nexperience it as 6:1, while others will experience it as 2:1.\n\nAlso, people could become familiar with certain reviewers and maintainers and\nhabitually assign their work to the same people. This means that people who had\nbeen maintainers for longer tended to get more reviews. Without the\nrandomization effect of Reviewer Roulette, this led to the creation of knowledge\nsilos, where knowledge about a particular subject would be concentrated in a few\nindividuals, rather than being spread across the organization.\n\nRoulette solved this for us with almost no cognitive load, and could scale\neffortlessly as our engineering team expands significantly. Sometimes, I first\nlearned someone new had joined the company through a review suggestion. The\nnumber and type of reviews a merge request needed was also increasing – I might\nneed to find a reviewer and maintainer for frontend, backend, QA, database,\ndocumentation, and UX concerns before merging. It's a lot to keep track of\nmanually!\n\n## The bad\n\nDespite the advantages of Reviewer Roulette, I used it inconsistently after a\nfew months, and never actually contributed any improvements to the code. The\nintegration with Slack didn't fit my workflow very well because a chat channel\nis the last place I want to be when working on code! I like to treat Slack as\nthe [informal, asynchronous](https://handbook.gitlab.com/handbook/communication/#slack) communication\nchannel it is designed to be, but it is too easy to be sidetracked by ongoing\nconversations when popping in to get a reviewer recommendation.\n\nThen, we began running into deployment problems, and sometimes Reviewer Roulette\njust wasn't available at all. It only took a few failed attempts before I fell\nout of the habit of trying to use it, and we never did get around to making the\ndeployment work with Auto DevOps.\n\nIt turns out that I wasn't the only one having trouble with this iteration of Reviewer Roulette – we found\nthat backend reviews were [very unevenly distributed](https://gitlab.com/gitlab-org/gitlab-foss/issues/53119#note_111796691). Reviewer Roulette wasn't being used widely enough across GitLab for us to experience\nall the benefits, and as we geared up to add many more maintainers, fixing\nthis tool became very important.\n\n## The fix\n\nIn the interim, staff backend engineer on Delivery, [Yorick Peterse](/company/team/#yorickpeterse), introduced\n[Danger bot](https://github.com/danger/danger) into GitLab's CI pipeline and\nused it to enforce a fine set of coding standards that we couldn't quite express\nwith Rubocop.\n\nThe new bot would leave polite messages on our MRs, asking us to write\n[better commit messages](https://docs.gitlab.com/ee/development/contributing/merge_request_workflow.html#commit-messages-guidelines),\nor to seek database review if we'd changed any files in `db/`. That last part got me\nthinking: Why couldn't the Danger bot pick a potential database reviewer for us at the same\ntime? What was stopping it from detecting backend, frontend, or documentation\nchanges, and using Reviewer Roulette to choose reviewers and maintainers right there in\nthe merge request?\n\n[Very little, it turned out](https://gitlab.com/gitlab-org/gitlab/merge_requests/13506#note_175449376):\n\n![Reviewer Roulette in Action](https://about.gitlab.com/images/blogimages/roulette-review.jpg)\n\nBy making Reviewer Roulette happen automatically in the merge request itself, we\nremoved all the barriers that were preventing us from using the tool. I no longer had to be\non Slack to find a reviewer, instead the list was right there in the merge request as\nI went to change the assignee. Danger was guaranteed to run on every pipeline –\nthere were no deployments or environments to worry about, and if it broke,\nfixing it was automatically [high priority](/handbook/engineering/workflow/#broken-master).\n\nContributing changes also became much easier – the code was right there in the\nGitLab repository, and changes took effect immediately (again, no deployments!).\n\n## What's next?\n\nThe ChatOps version of Reviewer Roulette needed access to GitLab's Slack\nworkspace to use and so it wasn't available to most of our community contributors\nbeyond the [core team](/handbook/marketing/developer-relations/core-team/). Moving Reviewer Roulette to Danger doesn't really solve this\nproblem – it doesn't work well on forks of the `gitlab-org/gitlab` project so\ncommunity contributors don't benefit. This problem is something I'd really\nlike to fix in the future, not least because I work on a fork of GitLab\nday-to-day as well.\n\nDanger is a good tool but it does have [some limitations](https://docs.gitlab.com/ee/development/dangerbot.html) –\nin particular, [`danger local`](https://danger.systems/guides/troubleshooting.html#i-want-to-work-locally-on-my-dangerfile)\ndoesn't work for GitLab. This slows down development, since you have to commit\nand push changes to your merge request before you can see the effects.\n\nAnother big problem is that this most recent iteration of Reviewer Roulette only\nworks for the `gitlab` project. None of our satellite projects - `gitaly`,\n`gitlab-workhorse`, `gitlab-pages`, `gitlab-runner`, etc. – can use this\nversion of Reviewer Roulette. Similarly, [users of GitLab haven't\nbenefited from the work we've been doing on Roulette](https://gitlab.com/gitlab-org/gitlab-foss/merge_requests/24938#note_141874188).\nIdeally, we would have built this as a feature within GitLab itself, so everyone\ncould benefit from the tool.\n\nBy building Reviewer Roulette in Danger we've been able to protype and rapidly iterate\nto a solution that is working very well for the `gitlab` project. The next steps\nare to turn Reviewer Roulette [into a feature](https://gitlab.com/groups/gitlab-org/-/epics/1823) that all users of GitLab can benefit from, perhaps by leveraging the [CODEOWNERS file](https://gitlab.com/gitlab-org/gitlab/issues/12137).\n\nDo you have any ideas on how we can better integrate Reviewer Roulette into GitLab? Let us know by commenting [in the epic](https://gitlab.com/groups/gitlab-org/-/epics/1823)\nor by opening a new issue!\n\n[Cover photo](https://unsplash.com/photos/w6OniVDCfn0) by Krissia Cruz on [Unsplash](https://unsplash.com/search/photos/roulette).\n{: .note}\n",[1385,9,722],{"slug":3343,"featured":6,"template":699},"reviewer-roulette-one-year-on","content:en-us:blog:reviewer-roulette-one-year-on.yml","Reviewer Roulette One Year On","en-us/blog/reviewer-roulette-one-year-on.yml","en-us/blog/reviewer-roulette-one-year-on",{"_path":3349,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3350,"content":3355,"config":3361,"_id":3363,"_type":13,"title":3364,"_source":15,"_file":3365,"_stem":3366,"_extension":18},"/en-us/blog/scaling-down-how-we-prototyped-an-image-scaler-at-gitlab",{"title":3351,"description":3352,"ogTitle":3351,"ogDescription":3352,"noIndex":6,"ogImage":1793,"ogUrl":3353,"ogSiteName":685,"ogType":686,"canonicalUrls":3353,"schema":3354},"Scaling down: How we shrank image transfers by 93%","Our approach to delivering an image scaling solution to speed up GitLab site\nrendering","https://about.gitlab.com/blog/scaling-down-how-we-prototyped-an-image-scaler-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Scaling down: How we shrank image transfers by 93%\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matthias Käppler\"}],\n        \"datePublished\": \"2020-11-02\",\n      }",{"title":3351,"description":3352,"authors":3356,"heroImage":1793,"date":3358,"body":3359,"category":1239,"tags":3360},[3357],"Matthias Käppler","2020-11-02","{::options parse_block_html=\"true\" /}\n\n\n\n\nThe\n[Memory](https://about.gitlab.com/handbook/engineering/development/enablement/data_stores/application_performance/)\nteam recently shipped an improvement to our image delivery functions\n\nthat drastically reduces the amount of data we serve to clients. Learn here\nhow we went from knowing nothing about\n\n[Golang](https://golang.org/) and image scaling to a working on-the-fly\nimage scaling solution built into\n\n[Workhorse](https://gitlab.com/gitlab-org/gitlab-workhorse).\n\n\n## Introduction\n\n\nImages are an integral part of GitLab. Whether it is user and project\navatars, or images embedded in issues\n\nand comments, you will rarely load a GitLab page that does not include\nimages in some way shape or form.\n\nWhat you may not be aware of is that despite most of these images appearing\nfairly small when presented\n\non the site, until recently we were always serving them in their original\nsize.\n\nThis meant that if you would visit a merge request, then all user avatars\nthat appeared merely as thumbnails\n\nin sidebars or comments would be delivered by the GitLab application in the\nsame size they were uploaded in,\n\nleaving it to the browser rendering engine to scale them down as necessary.\nThis meant serving\n\nmegabytes of image data in a single page load, just so the frontend would\nthrow most of it away!\n\n\nWhile this approach was simple and served us well for a while, it had\nseveral major drawbacks:\n\n\n- **Perceived latency suffers.** The perceived latency is the time that\npasses between a user\n  requesting content, and that content actually becoming visible or being ready to engage with.\n  If the browser has to download several megabytes of image data, and then has to furthermore\n  scale down those images to fit the cells they are rendered into, the user experience unnecessarily suffers.\n- **Egress traffic cost.** On gitlab.com, we store all images in object\nstorage, specifically GCS\n  (Google Cloud Storage). This means that our Rails app first needs to resolve an image entity to\n  a GCS bucket URL where the binary data resides, and have the client\n  download the image through that endpoint. This means that for every image served, we cause\n  traffic from GCS to the user that we have to pay for, and the more data we serve, the higher the cost.\n\nWe therefore took on the challenge to both improve rendering performance and\nreduce traffic costs\n\nby implementing [an image scaler that would downscale\nimages](https://gitlab.com/groups/gitlab-org/-/epics/3822)\n\nto a requested size before delivering them to the client.\n\n\n### Phase 1: Understanding the problem\n\n\nThe first problem is always: understand the problem! What is the status quo\nexactly? How does it work?\n\nWhat is broken about it? What should we focus on?\n\n\nWe had a pretty good idea of the severity of the problem, since we regularly\nrun performance tests\n\nthrough [sitespeed.io](https://www.sitepeed.io) that highlight performance\nproblems on our site.\n\nIt had identified images sizes as one of the most severe issues:\n\n\n![sitespeed performance\ntest](https://gitlab.com/groups/gitlab-org/-/uploads/a06d8bfde802995c577afca843be7e96/Bildschirmfoto_2020-07-15_um_11.45.44.png)\n\n\nTo better inform a possible solution, an essential step was to [collect\nenough data](https://gitlab.com/gitlab-org/gitlab/-/issues/227387)\n\nto help identify the areas we should focus on. Here are some of the\nhighlights:\n\n\n- **Most images requested are avatars.** We looked at the distribution of\nrequests for certain types of images.\n  We found that about 70% of them were for avatars, while the remaining 30% accounted for embedded images.\n  This suggested that any solution would have the biggest reach if we focused on avatars first. Within the\n  avatar cohort we found that about 62% are user avatars, 22% are project avatars, and 16% are group avatars,\n  which isn't surprising.\n- **Most avatars requested are PNGs or JPEGs.** We also looked at the\ndistribution of image formats. This is partially\n  affected by our upload pipeline and how images are processed (for instance, we always crop user avatars and store them as PNGs)\n  but we were still surprised to see that both formats made up 99% of our avatars (PNGs 76%, JPEGs 23%). Not much\n  love for GIFs here!\n- **We serve 6GB of avatars in a typical hour.** Looking at a representative\nwindow of 1 hour of GitLab traffic, we saw\n  almost 6GB of data move over the wire, or 144GB a day. Based on experiments with downscaling a representative user avatar,\n  we estimated that we could reduce this to a mere 13GB a day on average, saving 130GB of bandwidth each day!\n\nThis was proof enough for us that there were significant gains to be made\nhere. Our first intuition was: could this\n\nbe done by a CDN? Some modern CDNs like Cloudflare [already support image\nresizing](https://support.cloudflare.com/hc/en-us/articles/360028146432-Understanding-Cloudflare-Image-Resizing)\n\nin some of their plans. However, we had two major concerns about this:\n\n\n1. **Supporting our self-managed customers.** While gitlab.com is the\nlargest GitLab deployment we know of, we have hundreds of thousands\n  of customers who run their own GitLab installation. If we were to only resize images that pass through a CDN in front of gitlab.com,\n  none of those customers would benefit from it.\n1. **Pricing woes.** While there are request budgets based on your CDN plan,\nwe were worried about the operational cost this would\n  add for us and how to reliably predict it.\n\nWe therefore decided to look for a solution that would work for all GitLab\nusers, and that would be more under\n\nour own control, which led us to phase 2: experimentation!\n\n\n### Phase 2: Experiments, experiments, experiments!\n\n\nA frequent challenge for [our team\n(Memory)](https://about.gitlab.com/handbook/engineering/development/enablement/data_stores/application_performance/)\n\nis that we need to venture into parts of GitLab's code base\n\nthat we are unfamiliar with, be it with the technology, the product area, or\nboth. This was true in this\n\ncase as well. While some of us had some exposure to image scaling services,\nnone of us had ever built or\n\nintegrated one.\n\n\nOur main goal in phase 2 was therefore to identify what the possible\napproaches to image scaling were,\n\nexplore them by researching existing solutions or even building\nproof-of-concepts (POCs), and grade\n\nthem based on our findings. The questions we asked ourselves along the way\nwere:\n\n\n- **When should we scale?** Upfront during upload or on-the-fly when an\nimage is requested?\n\n- **Who does the work?** Will it be a dedicated service? Can it happen\nasynchronously in Sidekiq?\n\n- **How complex is it?** Whether it's an existing service we integrate, or\nsomething we build ourselves,\n  does implementation or integration complexity justify its relatively simple function?\n- **How fast is it?** We shouldn't forget that we set out to solve a\nperformance issue. Are we sure that\n  we are not making the server slower by the same amount of time we save in the client?\n\nWith this in mind, we identifed [multiple architectural\napproaches](https://gitlab.com/groups/gitlab-org/-/epics/3979) to consider,\n\neach with their own pros and cons. These issues also doubled as a form of\n[architectural decision\nlog](https://github.com/joelparkerhenderson/architecture_decision_record#what-is-an-architecture-decision-record)\n\nso that decisions for or against an approach are recorded.\n\n\nThe major approaches we considered are outlined next.\n\n\n#### Static vs. dynamic scaling\n\n\nThere are two basic ways in which an image scaler can operate: it can either\ncreate thumbnails of\n\nan existing image ahead of time, e.g. during the original upload as a\nbackground job. Or it can\n\nperform that work on demand, every time an image is requested. To make a\nlong story short: while\n\nit took a lot of back and forth, and even though we had [a working\nPOC](https://gitlab.com/gitlab-org/gitlab/-/issues/232616),\n\nwe eventually discarded the idea of scaling statically, at\n\nleast for avatars. Even though\n[CarrierWave](https://github.com/carrierwaveuploader/carrierwave) (the Ruby\nuploader\n\nwe employ) has an integration\n\nwith MiniMagick and is able to perform that kind of work, it suffered from\nseveral issues:\n\n\n1. **Maintenance heavy.** Since image sizes may change over time, a strategy\nis needed to backfill sizes\n  that haven't been computed yet. This raised questions especially for self-managed customers where\n  we do not control the GitLab installation.\n1. **Statefulness.** Since thumbnails are created alongside the original\nimage, it was unclear how to perform\n  cleanups should they become necessary, since CarrierWave does not store these as separate database\n  entities that we could easily query.\n1. **Complexity.** The POC we created turned out to be more complex than\nanticipated and felt like we\n  were shoehorning this feature onto existing code. This was exacerbated by the fact that at the time\n  we were running a very old version of CarrierWave that was already a maintenance liability, and upgrading it\n  would have added scope creep and delays to an already complex issue.\n1. **Flexibility.** The actual scaler implementation in CarrierWave is\nburied three layers down the Ruby dependency stack,\n  and it was difficult to replace the actual scaler binary (which would become a\n  problem when trying to secure this solution as we will see in a moment.)\n\nFor these reasons we decided to scale images on-the-fly instead.\n\n\n### Dynamic scaling: Workhorse vs. dedicated proxy\n\n\nWhen scaling images on-the-fly the question becomes: where? Early on there\nwas a suggestion to use\n\n[imgproxy](https://github.com/imgproxy/imgproxy), a \"fast and secure\nstandalone server for resizing and converting remote images\".\n\nThis sounded tempting, since it is a \"batteries included\" offering, it's\nfree to use, and it is a great\n\nway to isolate the task of image scaling from other production work loads,\nwhich has benefits around\n\nsecurity and fault isolation.\n\n\nThe main problem with imgproxy was exactly that, however: a standalone\nserver.\n\n[Introducing a new service to\nGitLab](https://docs.gitlab.com/ee/development/adding_service_component.html#adding-a-new-service-component-to-gitlab)\n\nis a complex task, since we strive to appear as a [single\napplication](https://about.gitlab.com/handbook/product/single-application/)\nto the end user,\n\nand documenting, packaging, configuring, running and monitoring a new\nservice just for rescaling images seemed excessive.\n\nIt therefore wasn't in line with our prerogative of focusing on the\n\n[minimum viable\nchange](https://handbook.gitlab.com/handbook/product/product-principles/#the-minimal-viable-change-mvc).\n\nMoreover, imgproxy had significant overlap with existing architectural\ncomponents at GitLab, since we already\n\nrun a reverse proxy:\n[Workhorse](https://gitlab.com/gitlab-org/gitlab-workhorse).\n\n\nWe therefore decided that the fastest way to deliver an MVC was to build out\nthis functionality in Workhorse\n\nitself. Fortunately we found that we already had an established pattern for\ndealing with special, performance\n\nsensitive workloads, which meant that we could\n\nlearn from existing solutions for similar problems (such as image delivery\nfrom remote storage), and we could\n\nlean on its existing integration with the Rails application for request\nauthentication and running business\n\nlogic such as validating user inputs, which helped us tremendously to focus\non the actual problem: scaling images.\n\n\nThere was a final decision to make, however: scaling images is a very\ndifferent kind of workload from\n\nserving ordinary requests, so an open question was how to integrate a scaler\ninto Workhorse in a way\n\nthat would not have knock-on effects on other tasks Workhorse processes need\nto execute.\n\nThe two competing approaches discussed were to either shell out to an\nexecutable that performs the scaling,\n\nor run a [sidecar\nprocess](https://docs.microsoft.com/en-us/azure/architecture/patterns/sidecar#:~:text=Sidecars%20are%20supporting%20processes%20or,fate%20of%20its%20parent%20application.)\n\nthat would take over image scaling work loads from the main Workhorse\nprocess.\n\n\n### Dynamic scaling: Sidecar vs. fork-on-request\n\n\nThe main benefit of a sidecar process is that it has its own life-cycle and\nmemory space, so it can be tuned\n\nseparately from the main serving process, which improves fault isolation.\nMoreover, you only pay the\n\ncost for starting the process once. However, it also comes with\n\nadditional overhead: if the sidecar dies, something has to restart it, so we\nwould have to look at\n\nprocess supervisors such as `runit` to do this for us, which again comes\nwith a significant amount\n\nof configuration overhead. Since at this point we weren't even sure how\ncostly it would be to serve\n\nimage scaling requests, we let our MVC principle guide us and decided to\nfirst explore the simpler\n\nfork-on-request approach, which meant shelling out to a dedicated scaler\nbinary on each image scaling\n\nrequest, and only consider a sidecar as a possible future iteration.\n\n\nForking on request was [explored as a\nPOC](https://gitlab.com/gitlab-org/gitlab/-/issues/230519)\n\nfirst, and was quickly made production ready and deployed\n\nbehind a feature toggle. We initially ended up settling on\n[GraphicsMagick](http://www.graphicsmagick.org/)\n\nand its `gm` binary to perform the actual image scaling for us, both because\nit is a battle tested library, but also\n\nbecause there was precedent at GitLab to use it for existing features, which\nallowed us to ship\n\na solution even faster.\n\n\nThe overall request flow finally looked as follows:\n\n\n```mermaid\n\nsequenceDiagram\n    Client->>+Workhorse: GET /image?width=64\n    Workhorse->>+Rails: forward request\n    Rails->>+Rails: validate request\n    Rails->>+Rails: resolve image location\n    Rails-->>-Workhorse: Gitlab-Workhorse-Send-Data: send-scaled-image\n    Workhorse->>+Workhorse: invoke image scaler\n    Workhorse-->>-Client: 200 OK\n```\n\n\nThe \"secret sauce\" here is the `Gitlab-Workhorse-Send-Data` header\nsynthesized by Rails. It carries\n\nall necessary parameters for Workhorse to act on the image, so that we can\nmaintain a clean separation\n\nbetween application logic (Rails) and serving logic (Workhorse).\n\nWe were fairly happy with this solution in terms of simplicity and ease of\nmaintenance, but we\n\nstill had to verify whether it met our expectations for performance and\nsecurity.\n\n\n### Phase 3: Measuring and securing the solution\n\n\nDuring the entire development cycle, we frequently measured the performance\nof the various appraoches\n\nwe tested, so as to understand how they would affect request latency and\nmemory use.\n\nFor latency tests we relied on [Apache\nBench](https://httpd.apache.org/docs/2.4/programs/ab.html), since\n\nrecalling our initial mission, we were mostly interested in reducing the\nrequest latency a user might experience.\n\n\nWe also ran benchmarks encoded as Golang tests that specifically [compared\ndifferent scaler\nimplementations](https://gitlab.com/ayufan/image-resizing-test)\n\nand how performance changed with different image formats and image sizes. We\nlearned a lot from these\n\ntests, especially where we would typically lose the most time, which often\nwas in encoding/decoding\n\nan image, and not in resizing an image per se.\n\n\nWe also took security very seriously from the start. Some image formats such\nas SVGs are notorious\n\nfor remote code execution attacks, but there were other concerns such as\nDOS-ing the service with\n\ntoo many scaler requests or PNG compression bombs. We therefore\n\nput very strict requirements in place around what sizes (both dimensionally\nbut also in bytes) and\n\nformats we will accept.\n\n\nUnfortunately one fairly severe issue remained that turned out to be a deal\nbreaker with our simple\n\nsolution: `gm` is a complex piece of software, and shelling out to a 3rd\nparty binary written in C still\n\nleaves the door open for a number of security issues. The decision was to\n[sandbox the binary](https://gitlab.com/groups/gitlab-org/-/epics/4373)\n\ninstead, but this turned out\n\nto be a lot more difficult than anticipated. We evaluated but discarded\nmultiple approaches to sandboxing\n\nsuch as via `setuid`, `chroot` and `nsjail`, as well as building a custom\nbinary on top of [seccomp](https://en.wikipedia.org/wiki/Seccomp).\n\nHowever, due to performance, complexity or other concerns we discarded all\nof them in the end.\n\nWe eventually decided to sarifice some performance for the sake of\nprotecting our users as best we can and\n\nwrote a scaler binary in Golang, based on an existing\n[imaging](https://github.com/disintegration/imaging)\n\nlibrary, which had none of these issues.\n\n\n### Results, conclusion and outlook\n\n\nIn roughly two months we took an innocent sounding but in fact complex\ntopic, image scaling, and went\n\nfrom \"we know nothing about this\" to a fully functional solution that is now\nrunning on gitlab.com.\n\nWe faced many headwinds along the way, in part because we were unfamiliar\nwith both the topic and\n\nthe technology behind Workhorse (Golang), but also because we underestimated\nthe challenges of delivering\n\nan image scaler that will be both fast and secure, an often difficult\ntrade-off. A major lesson learned\n\nfor us is that security cannot be an afterthought; it has to be part of the\ndesign from day one and\n\nmust be part of informing the approach taken.\n\n\nSo was it a success? Yes! While the feature didn't have as much of an impact\non overall perceived client\n\nlatency as we had hoped, we still dramatically improved a number of metrics.\nFirst and foremost, the\n\ndreaded \"properly size image\" reminder that topped our sitepeed metrics\nreports is resolved. This is also evident\n\nin the average image size processed by clients, which for image heavy pages\nfell off a cliff (that's good -- lower is\n\nbetter here):\n\n\n![image size\nmetric](https://gitlab.com/groups/gitlab-org/-/uploads/b453aedaf2132db1292898508fd6a0c1/Bildschirmfoto_2020-10-06_um_07.02.56.png)\n\n\nSite-wide we saw a staggering **93% reduction** in image transfer size of\npage content delivered to clients.\n\nThese gains also translate into savings for GCS egress traffic, and hence\nDollar cost savings, by an equivalent amount.\n\n\nA feature is never done of course, and there are a number of things we are\nlooking to improve in the future:\n\n\n- Improving metrics and observability\n\n- Improving performance through more aggressive caching\n\n- Adding support for WebP and other features such as image blurring\n\n- Supporting content images embedded into GitLab issues and comments\n\n\nThe Memory team meanwhile will slowly step back from this work, however, and\nhand it over to product teams\n\nas product requirements evolve.",[9,789,1074],{"slug":3362,"featured":6,"template":699},"scaling-down-how-we-prototyped-an-image-scaler-at-gitlab","content:en-us:blog:scaling-down-how-we-prototyped-an-image-scaler-at-gitlab.yml","Scaling Down How We Prototyped An Image Scaler At Gitlab","en-us/blog/scaling-down-how-we-prototyped-an-image-scaler-at-gitlab.yml","en-us/blog/scaling-down-how-we-prototyped-an-image-scaler-at-gitlab",{"_path":3368,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3369,"content":3375,"config":3381,"_id":3383,"_type":13,"title":3384,"_source":15,"_file":3385,"_stem":3386,"_extension":18},"/en-us/blog/scaling-our-use-of-sidekiq",{"title":3370,"description":3371,"ogTitle":3370,"ogDescription":3371,"noIndex":6,"ogImage":3372,"ogUrl":3373,"ogSiteName":685,"ogType":686,"canonicalUrls":3373,"schema":3374},"How we scaled async workload processing at GitLab.com using Sidekiq","Sidekiq was a great tool for async processing until it couldn't keep up. Here's how we made it scale.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667068/Blog/Hero%20Images/sidekiqmountain.jpg","https://about.gitlab.com/blog/scaling-our-use-of-sidekiq","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we scaled async workload processing at GitLab.com using Sidekiq\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rachel Nienaber\"}],\n        \"datePublished\": \"2020-06-24\",\n      }",{"title":3370,"description":3371,"authors":3376,"heroImage":3372,"date":3378,"body":3379,"category":832,"tags":3380},[3377],"Rachel Nienaber","2020-06-24","## Sidekiq at GitLab\n\nGitLab is a Ruby-on-Rails application that processes a lot of data. Much of this processing can be done asynchronously,\nand one of the solutions we use to accomplish this is [Sidekiq](https://github.com/mperham/sidekiq/wiki) which is a background-processing\nframework for Ruby. It handles jobs that are better processed asynchronously outside the web request/response cycle.\n\nThere are a few terms that that we'll use in this post:\n\n* A **worker class** is a class defined in our application to process a task in Sidekiq.\n* A **job** is an instance of a worker class, so each job represents a single task.\n* A **queue** is a collection of jobs (potentially for different worker classes) that are waiting to be processed.\n* A **worker thread** is a thread processing jobs in particular queues. Each Sidekiq process can have multiple worker threads.\n\nThen there are two terms specific to GitLab.com:\n\n* A **Sidekiq role** is a configuration for a particular group of queues. For instance, we might have a `push_actions` role that is for processing the `post_receive` and `process_commit` queues.\n* A **Sidekiq node** is an instance of the GitLab application for a Sidekiq role. A Sidekiq node can have multiple Sidekiq processes.\n\nBack in 2013, in version 6.3 of GitLab, every Sidekiq worker class had its own queue. We weren't strict in monitoring the creation of\nnew worker classes. There was no strategic plan for assigning queues to where they would execute.\n\nIn 2016, we tried to introduce order again, and rearranged the queues to be based on features. We followed this with a change in\n2017 to have a dedicated queue for each worker class again, and we were able to monitor queues more accurately and impose specific\nthrottles and limits to each. It was easy to quickly make decisions about the queues as they were running because of how\nthe work was distributed. The queues were grouped, and the names of these groups were `realtime`, `asap`, and `besteffort` for example.\n\nAt the time, we knew that this was not the approach recommended by the author of Sidekiq, Mike Perham, but we felt that we knew what\nthe trade-offs were. In fact, Mike wrote: \n\n> “I don't recommend having more than a handful of queues. Lots of queues makes for a more complex\n> system [and Sidekiq Pro cannot reliably](https://github.com/antirez/redis/issues/1785) handle multiple queues without\n> polling. M Sidekiq Pro processes polling N queues means O(M*N) operations per second slamming Redis.”\n\nFrom [https://github.com/mperham/sidekiq/wiki/Advanced-Options#queues](https://github.com/mperham/sidekiq/wiki/Advanced-Options#queues)\n\nThis served us well for nearly two years before this approach no longer matched our scaling needs.\n\n### Pressure from availability issues\n\nIn mid-2019 GitLab.com experienced several different major incidents related to the way we\nprocess background queues.\n\nExamples of these incidents:\n- [Gitaly n+1 calls caused bad latency and resulted in the Sidekiq queues growing](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/7479).\nThis was due to the way we processed tags in Gitaly.\n- A user generated many notes on a single commit which [slowed down the new_note Sidekiq queue](https://gitlab.com/gitlab-com/gl-infra/production/issues/1028)\nand led to a delay of sending out notifications.\n- CI jobs took very long to complete because [jobs in the pipeline_processing:pipeline_process Sidekiq queue piled up](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/7402).\n2 pipelines caused a high amount of Sidekiq jobs, Sidekiq pipeline nodes were maxing out their CPU, pipeline_processing\njobs were causing many SQL calls and the pgbouncer pool for Sidekiq was becoming saturated.\n\nAll of these were showing that we needed to take action.\n\n![Sidekiq throughput per job](https://about.gitlab.com/images/blogimages/sidekiq_throughput_per_job.png){: .shadow}\n\nThis image shows how many jobs we process per second over a 24 hour period. This shows the variety of jobs and\ngives an idea of the scale of jobs in relation to each other.\n\n### Improvements\n\n#### Changing the relationship between jobs and Sidekiq roles\n\nIn [infrastructure#7219 (closed)](https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/7219) we significantly\naltered our approach for how jobs were related to Sidekiq roles.\n\nWe started from a position where:\n1. We had a many-to-many relationship between Sidekiq jobs and Sidekiq roles.\n   1. For example, most pipeline jobs ran on the `besteffort` nodes, but some ran on the pipeline nodes.\n   1. Some jobs ran on up to three types of node: eg `realtime`, `asap` and `besteffort` priorities.\n1. Worker threads were reserved for single queues.\n   1. For example, one eighth of the `realtime` queue might be reserved for new_note jobs. In the event of a glut of\n  new_note jobs, most of the fleet would sit idle while one worker thread would be saturated. Worse, adding more nodes would\n  only increase processing power by 1/8th of a node, not the full compute capacity of the new node.\n1. Urgent and non-urgent jobs would be in the same queue.\n   1. For example, some jobs in the `realtime` queue would take up to 10 minutes to process.\n   1. This is a bit like allowing overloaded trolleys in the 10 items-or-less lane.\n\nOnce the issue was completed, we now had:\n1. A one-to-one relationship between Sidekiq jobs and Sidekiq roles\n   1. Each job will execute on exactly one Sidekiq role\n1. All worker threads will run all jobs, and each Sidekiq node will have the same number of worker threads\n   1. When a glut of jobs comes in, 100% of compute on a node can be dedicated to executing the jobs\n1. Slow jobs and fast jobs are kept apart\n   1. The 10 items or less lane is now being enforced.\n\nWhile this was a significant improvement, it introduced some technical debt. We fixed everything for a moment in time,\nknowing that as soon as the application changed this would be out of date, and as time went on, would only get more out\nof date until we were back in the same position. To try and mitigate this in future, we started to look at classifying\nthe workloads and using queue selectors.\n\n#### Queue Selectors Deployed in Sidekiq Cluster\n\nIn the\n[Background Processing Improvements Epic](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/96), we looked at ways\nthat we could simplify the structure so that background processing could be in a position to scale to 100x the traffic\nat the time. We also needed the processing to be unsurprising. Operators (and developers) should understand where a job\nwill run, why it is queueing up and how to reduce queues. We decided to move to using [queue selectors](https://docs.gitlab.com/ee/administration/sidekiq/extra_sidekiq_processes.html)\nto help us to keep the queue definitions correct. (This approach is still experimental).\n\nIn addition, the infrastructure team should not reactively (and manually) route Sidekiq jobs to priority fleets, as\nwas the situation previously. Developers should have the ability to specify the requirements of their workloads and\nhave these automatically processed on a queue designed to support that type of work.\n\nSidekiq processes can be configured to select specific queues for processing. Instead of making this selection by name,\nwe wanted to make the selection on how the workload for that queue was classified.\n\nWe came up with an approach for classifying background jobs by their workload and building a sustainable way of grouping\nsimilar workloads together.\n\nWhen a new job is created, developers need to do this to classify the workload. This is done through\n- Specifying the [urgency of the job](https://docs.gitlab.com/ee/development/sidekiq/index.html). The options\nare `high`, `low` and `none`. If the delay of a job would have user impact, then the job is `high` urgency.\n- Noting if the [job has external dependencies](https://docs.gitlab.com/ee/development/sidekiq/index.html)\nthat could impact their availability. (For example, if they communicate with user-specified Kubernetes clusters).\n- Adding an [annotation declaring if the worker class will be cpu-bound or memory-bound](https://docs.gitlab.com/ee/development/sidekiq/index.html). Knowing\nthis allows us to make decisions around how much thread concurrency a Ruby process can tolerate, or targeting memory-bound\njobs to low-concurrency, high-memory nodes.\n\nThere is additional guidance available to [determine if the worker class should be marked as cpu-bound](https://docs.gitlab.com/ee/development/sidekiq/index.html).\n\n#### SLAs are based on these attributes\n\n1. High urgency jobs should not queue for more than 10 seconds.\n1. High urgency jobs should not take more than 10 seconds to execute (this SLA is the responsibility of the owning team to ensure that high throughput is maintained).\n1. Low urgency jobs should not queue for more than 1 minute.\n1. Jobs without urgency have no queue SLA.\n1. Non-high urgency jobs should not take more than 5 minutes to execute.\n\nIn each case, the queuing SLAs are the responsibility of the infrastructure team, as they need to ensure that the fleet is\ncorrectly provisioned to meet the SLA.\n\nThe execution latency SLAs are the responsibility of the development team owning the worker class, as they need to ensure that the\nworker class is sufficiently performant to ensure throughput.\n\n![Sidekiq certain queues spike](https://about.gitlab.com/images/blogimages/sidekiq_authorized_projects.png){: .shadow}\n\nThis image shows the challenges we faced by having jobs of different urgency running on the same queue.\nThe purple lines show spikes from one particular worker, where many jobs were added to the queue,\ncausing delays to other jobs which were often of equal or higher importance.\n\n### Challenge during rollout - BRPOP\n\nAs the number of background queues in the GitLab application grows, this approach continues to burden our Sidekiq Redis\nservers. On GitLab.com, our `catchall` Sidekiq nodes monitor about 200 queues, and the Redis [BRPOP](https://redis.io/commands/brpop)\ncommands used to monitor the queues consume a significant amount of time (by Redis latency standards).\n\nThe number of clients listening made this problem worse. For `besteffort` we had 7 nodes, each running 8 processes,\nwith 15 threads watching those queues - meaning 840 clients.\n\nThe command causing the problem was BRPOP. The time taken to perform this command also relates\nto the number of listeners on those keys. The addition of multiple keys increases contention in the system which causes\nlots of connections to block. And when the key list is longer the problem gets worse. The keylist represents the number of\nqueues, the more queues we have, the more keys we are listening to. We saw this problem on the nodes that process the most queues.\n\nWe raised an issue in the Redis issue tracker about the [performance we observed when many clients performed BRPOP on the\nsame key](https://github.com/antirez/redis/issues/7071). It was fantastic when [Salvatore](https://github.com/antirez)\nresponded within the hour and the patch was available the same day!  This fix was made in Redis 6 and backported to Redis 5.\n[Omnibus has also been upgraded to use this fix](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/4126), and it will\nbe available in the major release 13.0.\n\n### Current State (as of June 2020)\n\nMigrating to these new selectors has been completed as of late April 2020.\n\nWe reduced our Sidekiq fleet from 49 nodes with 314 CPUs, to 26 nodes with 158 CPUs. This has also reduced our cost.\nThe average utilization is more evenly spread across the new fleets.\n\nAlso, we have [moved Sidekiq-cluster to Core](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/181). Previously, running\nSidekiq in clustered mode (i.e. spawning more than one process) was\ntechnically only available as part of GitLab EE distributions, and for self-managed environments only in the Starter+ tiers.\nBecause of that, when booting Sidekiq up in a development env with the GDK, the least common denominator was assumed,\nwhich was to run Sidekiq in a single-process setup. That can be a problem, because it means there is a divergence between\nthe environment developers work on, and what will actually run in production (i.e. gitlab.com and higher-tier self-managed environments).\n\nIn [release 13.0](/releases/2020/06/22/gitlab-13-1-released/) Sidekiq Cluster is used by default.\n\nWe’re also better placed to migrate to Kubernetes.  The selector approach is a lot more compatible with making good\ndecisions about things like CPU allocations + limits for Kubernetes workloads, and this will make the job of our delivery\nteam easier, leading to further cost reductions from auto-scaling deployed resources to match actual load.\n\nOur next piece of work with Sidekiq will be to [reduce the number of queues that we need to watch](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/194)\nand we will post a follow-up to this blog post when the work is completed.\n\n**Read more about infrastructure issues:**\n\n[Faster pipelines with DAG](/blog/directed-acyclic-graph/)\n\n[Keep Kubernetes runners moving](/blog/best-practices-for-kubernetes-runners/)\n\n[Understand parent-child pipelines](/blog/parent-child-pipelines/)\n\nCover image by [Jerry Zhang](https://unsplash.com/@z734923105) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[790,1074,9],{"slug":3382,"featured":6,"template":699},"scaling-our-use-of-sidekiq","content:en-us:blog:scaling-our-use-of-sidekiq.yml","Scaling Our Use Of Sidekiq","en-us/blog/scaling-our-use-of-sidekiq.yml","en-us/blog/scaling-our-use-of-sidekiq",{"_path":3388,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3389,"content":3395,"config":3401,"_id":3403,"_type":13,"title":3404,"_source":15,"_file":3405,"_stem":3406,"_extension":18},"/en-us/blog/secure-stage-for-appsec",{"title":3390,"description":3391,"ogTitle":3390,"ogDescription":3391,"noIndex":6,"ogImage":3392,"ogUrl":3393,"ogSiteName":685,"ogType":686,"canonicalUrls":3393,"schema":3394},"How GitLab's application security dashboard helps AppSec engineers","GitLab Security features help application security engineers collaborate more efficiently and better assess the security posture of the projects they oversee.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663482/Blog/Hero%20Images/ralph-kayden-4Cg5T03B_8s-unsplash.jpg","https://about.gitlab.com/blog/secure-stage-for-appsec","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab's application security dashboard helps AppSec engineers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2020-07-07\",\n      }",{"title":3390,"description":3391,"authors":3396,"heroImage":3392,"date":3398,"body":3399,"category":832,"tags":3400},[3397],"Fernando Diaz","2020-07-07","\n[Application Security (AppSec)](/topics/devsecops/) engineers focus on enhancing an application's security, by\nfinding, resolving, and preventing vulnerabilities. But managing all these\nvulnerabilities across different teams and projects is not an easy process. Managing vulnerabilities\ncan be simplified by using the [GitLab Secure](/stages-devops-lifecycle/secure/)\nfeatures found in [GitLab Ultimate](/pricing/ultimate/).\n\nOne of the significant capabilities of GitLab Secure is the accurate, automated, and continuous assessment of the\nsecurity of your applications and services through a unified dashboard.\n\nIn this blog post, I will show four ways GitLab Secure makes life easier for the AppSec\nengineer.\n\n---\n\n## Finding vulnerabilities with security scans\n\nThe first capability that AppSec engineers will find useful is the robust security scanning capabilities in [GitLab Ultimate](/pricing/ultimate/).\n\nThese capabilities allow you to proactively identify vulnerabilities and weaknesses to minimize your security risk\nusing a variety of defense-in-depth techniques. The security scans include the following:\n\n* [Static Application Security Testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)\n* [Dynamic Application Security Testing (DAST)](https://docs.gitlab.com/ee/user/application_security/dast/)\n* [Container Scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/)\n* [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/)\n* [License Scanning](https://docs.gitlab.com/ee/user/compliance/license_compliance/index.html)\n\n![pipeline with security scans](https://about.gitlab.com/images/blogimages/secure-stage-for-infosec-professionals/pipeline.png)\nGitLab pipeline running security scans\n{: .note.text-center}\n\nSimply add a [template](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates/Security)\nto your [.gitlab-ci.yml](https://docs.gitlab.com/ee/ci/yaml/#includetemplate) or by enable [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) to set-up the scans.\n\nWhen submitting a merge request (MR), the security scans will run and populate the MR with\ndata on the vulnerabilities detected and how to resolve them. This data allows AppSec engineers\nto begin risk analysis and remediation.\n\n## Managing vulnerabilities with the Security Dashboard\n\nThe second most useful capability for AppSec engineers is the [Security Dashboard](https://docs.gitlab.com/ee/user/application_security/security_dashboard/), which helps\nkeep projects organized and summarizes the relevant security details for an application, all in one place.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/t-3TSlChHy4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThe Security Dashboard in [GitLab Ultimate](/pricing/ultimate/) provides a high-level overview of the status of all the vulnerabilities\ndetected in groups, projects, and pipelines.\n\n![security dashboard with group view](https://about.gitlab.com/images/blogimages/secure-stage-for-infosec-professionals/dashboard-group.png)\nSecurity Dashboard Group-Level view\n{: .note.text-center}\n\nBy using the Security Dashboard, an AppSec engineer can drill down into each\nvulnerability to obtain additional information, such as how to resolve the vulnerability,\nhow it was handled by the developer, and if a work ticket (or GitLab issue) has been opened\nfor remediation.\n\nThe Security Dashboard also shows which file the vulnerability was detected in. Each vulnerability\nis assigned a severity and a report type. By using this information an AppSec Engineer\ncan quickly identify which items is the most critical for the team to tackle first.\n\n![security dashboard with project view](https://about.gitlab.com/images/blogimages/secure-stage-for-infosec-professionals/dashboard-project.png)\nSecurity Dashboard project-level view\n{: .note.text-center}\n\nIt's important to note the Security Dashboard supports integrations with third-party scanners.\nFor example, if you are using [WhiteSource](https://www.whitesourcesoftware.com/gitlab/), the\nscans results can added to and managed in the Security Dashboard.\n\n## Auditing with the Security Dashboard\n\nA third capability GitLab Secure offers AppSec engineers is auditing. The engineer can use this capability to audit\na project or group based on the vulnerabilities revealed in various tests. By using the Security Dashboard,\nthe AppSec engineer can see which vulnerabilities have been dismissed, the developer who dismissed them, as\nwell as the reason why they were dismissed. This is helpful for several reasons:\n\n* Check to make sure the development team is practicing secure coding\n* Confirm there are no malicious actors dismissing issues\n* Keep track of the status of vulnerabilities which could not be immediately resolved\n\n![security dashboard vulnerability info](https://about.gitlab.com/images/blogimages/secure-stage-for-infosec-professionals/dashboard-info.png)\nVulnerability info displayed in Security Dashboard\n{: .note.text-center}\n\nAn AppSec engineer can also track and create [confidential issues](https://docs.gitlab.com/ee/user/project/issues/confidential_issues.html) from the\nSecurity Dashboard. A team can keep track of the status\nof a vulnerability in private, and make sure it is still on track to being resolved when using confidential issues. A\nconfidential branch can be created along with the issue, so that the development team\ncan work on a resolution without tipping off malicious actors.\n\n![security dashboard confidential issue creation](https://about.gitlab.com/images/blogimages/secure-stage-for-infosec-professionals/dashboard-issue-creation.png)\nConfidential issues created via Security Dashboard\n{: .note.text-center}\n\n## Managing software licenses\n\nThe final capability we recommend AppSec engineers use is our license management.\n\nTypically, developers will use a variety of open source dependencies instead of reinventing the wheel.\nThere is a problem though: using a dependency with a restrictive license can invalidate your application.\n\nAn AppSec engineer is able to add a policy to mark licenses as acceptable or unacceptable for a project and its dependencies.\nIf an unacceptable license is found, the MR can be blocked. The video below provides\nan overview:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/42f9LiP5J_4\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nGitLab Secure capabilities enhance the effiency of AppSec engineers, ultimately\nleading to the production of more secure applications and a more security-empowered\ndevelopment team. Learn more at [DevSecOps](/solutions/security-compliance/) and\ncheckout the [GitLab Secure direction page](/direction/secure/) for more\ninformation on the upcoming features and integrations.\n\n### Level up your DevSecOps knowledge:\n\n  [GitLab's security tools and the HIPAA risk analysis](https://about.gitlab.com/blog/gitlab-security-tools-and-the-hipaa-risk-analysis/)\n  [A deep dive into the Security Analyst persona](https://about.gitlab.com/blog/a-deep-dive-into-the-security-analyst-persona/)\n  [Compliance made easy with GitLab](https://about.gitlab.com/blog/compliance-made-easy/)\n\nCover image by [Ralph Kayden](https://unsplash.com/@ralphkayden) on [Unsplash](https://unsplash.com/photos/4Cg5T03B_8s)\n{: .note}\n\n## Learn more about DevSecOps\n\n- [Efficient DevSecOps: 9 tips for shifting left](/blog/efficient-devsecops-nine-tips-shift-left/)\n- [Want better DevSecOps? Try cross-functional collaboration](/blog/achieve-devsecops-collaboration/)\n- [Compliance made easy with GitLab](/blog/compliance-made-easy/)\n\n\u003C%= partial \"includes/blog/blog-merch-banner\" %>\n",[787,722,9,744],{"slug":3402,"featured":6,"template":699},"secure-stage-for-appsec","content:en-us:blog:secure-stage-for-appsec.yml","Secure Stage For Appsec","en-us/blog/secure-stage-for-appsec.yml","en-us/blog/secure-stage-for-appsec",{"_path":3408,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3409,"content":3415,"config":3422,"_id":3424,"_type":13,"title":3425,"_source":15,"_file":3426,"_stem":3427,"_extension":18},"/en-us/blog/sentry-integration-blog-post",{"title":3410,"description":3411,"ogTitle":3410,"ogDescription":3411,"noIndex":6,"ogImage":3412,"ogUrl":3413,"ogSiteName":685,"ogType":686,"canonicalUrls":3413,"schema":3414},"Sentry's GitLab integration streamlines error remediation","Your code has bugs, my code has bugs, everyone’s code has bugs (probably). Let’s fix that.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679964/Blog/Hero%20Images/sentry-io-blog.jpg","https://about.gitlab.com/blog/sentry-integration-blog-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Streamline and shorten error remediation with Sentry’s new GitLab integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eva Sasson\"}],\n        \"datePublished\": \"2019-01-25\",\n      }",{"title":3416,"description":3411,"authors":3417,"heroImage":3412,"date":3419,"body":3420,"category":1584,"tags":3421},"Streamline and shorten error remediation with Sentry’s new GitLab integration",[3418],"Eva Sasson","2019-01-25","\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/KUHk1uuXWhA?rel=0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nSentry is open source error tracking that gives visibility across your entire stack and provides the details you need to fix bugs, ASAP. Because the only thing better than visibility and details is more visibility and details, Sentry improved their [GitLab integration](https://docs.sentry.io/workflow/integrations/global-integrations/gitlab/?utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM) by adding [release](https://docs.sentry.io/workflow/releases/?platform=browser&utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM) and [commit](https://docs.sentry.io/workflow/releases/?platform=browser&utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM#link-repository) tracking as well as [suspect commits](https://docs.sentry.io/workflow/releases/?platform=browser&utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM#after-linking-a-repository).\n\n### Streamline your workflow with issue management and creation\n\nWhen you receive an alert about an error, the last thing you want to do is to jump around 20 different tools trying to find out exactly what happened and where. Developers with both Sentry and GitLab in their application lifecycle benefit from issue management and issue creation to their GitLab accounts directly in the Sentry UI, alleviating some of the hassle of back-and-forth tool toggling.\n\n![GitLab account in Sentry](https://about.gitlab.com/images/blogimages/sentry/gitlab-sentry-integration.png){: .shadow.large.center}\n\nOf course, less tool jumping results in a more streamlined triaging process and shortened time to issue resolution – something that benefits the whole team.\n\n![Creating GitLab issue](https://about.gitlab.com/images/blogimages/sentry/create-gitlab-issue.png){: .shadow.medium.center}\n\nHave a GitLab issue that wasn’t created in Sentry? No problem. Existing issues are also easily linked.\n\n![Import GitLab issue](https://about.gitlab.com/images/blogimages/sentry/import-gitlab-issue.png){: .shadow.medium.center}\n\n### Find and fix bugs faster with release and commit tracking\n\nWhy stop at streamlining the triaging process, when we can also make issue resolution more efficient? Sentry’s GitLab integration now utilizes GitLab commits to find and fix bugs faster.\n\nWith the newly added release and commit tracking, an enhanced release overview page uncovers new and resolved issues, files changed, and authors. Developers can also resolve issues via commit messages or merge requests, see suggested assignees for issues, and receive detailed deploy emails.\n\nWant a big flashing arrow that points to an error’s root cause? Sentry’s suspect commits feature exposes the commit that likely introduced an error as well as the developer who wrote the broken code.\n\n![Suspect commits feature](https://about.gitlab.com/images/blogimages/sentry/suspect-commits-feature.png){: .shadow.medium.center}\n\nKeep in mind that this feature is available for Sentry users on “Teams” plans and above.\n{: .note}\n\nCheck out [Sentry’s GitLab integration documentation](https://docs.sentry.io/workflow/integrations/global-integrations/gitlab/?utm_source=GitLab&utm_medium=blog&utm_campaign=GitLab_GTM) to get started.\n\n### What’s next?\n\nAgain, why stop there, when we can do even more? GitLab is currently working to bring Sentry into the GitLab interface. Soon, GitLab and Sentry users will see their Sentry errors listed in their GitLab projects. Read the documentation on [the integration here](https://docs.gitlab.com/ee/operations/error_tracking.html).\n\n### About the guest author\n\nEva Sasson is a Product Marketer at [Sentry.io](https://sentry.io/welcome/), an open source error-tracking tool that gives developers the contextual information they need to resolve issues quickly, and integrates with the other development tools across the stack.\n",[108,696,722,232,721,787,789,1364,9],{"slug":3423,"featured":6,"template":699},"sentry-integration-blog-post","content:en-us:blog:sentry-integration-blog-post.yml","Sentry Integration Blog Post","en-us/blog/sentry-integration-blog-post.yml","en-us/blog/sentry-integration-blog-post",{"_path":3429,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3430,"content":3436,"config":3442,"_id":3444,"_type":13,"title":3445,"_source":15,"_file":3446,"_stem":3447,"_extension":18},"/en-us/blog/set-up-infrastructure-for-cloud-development-environments",{"title":3431,"description":3432,"ogTitle":3431,"ogDescription":3432,"noIndex":6,"ogImage":3433,"ogUrl":3434,"ogSiteName":685,"ogType":686,"canonicalUrls":3434,"schema":3435},"Cloud infrastructure for on-demand development in GitLab","Learn how to set up the requirements, manage Kubernetes clusters in different clouds, create the first workspaces and custom images, and get tips and troubleshooting.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659883/Blog/Hero%20Images/post-cover-image.jpg","https://about.gitlab.com/blog/set-up-infrastructure-for-cloud-development-environments","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Set up your infrastructure for on-demand, cloud-based development environments in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-07-13\",\n      }",{"title":3437,"description":3432,"authors":3438,"heroImage":3433,"date":3439,"body":3440,"category":832,"tags":3441},"Set up your infrastructure for on-demand, cloud-based development environments in GitLab",[2491],"2023-07-13","Cloud-based development environments enable a better developer onboarding experience and help make teams more efficient. In this tutorial, you'll learn how to ready your infrastructure for on-demand, cloud-based development environments. You'll also learn how to set up the requirements, manage Kubernetes clusters in different clouds, create your first workspaces and custom images, and get tips for troubleshooting.\n\nThe GitLab agent for Kubernetes, an OAuth GitLab app, and a proxy pod deployment make the setup reproducible in different Kubernetes cluster environments and follow cloud-native best practices. Bringing your infrastructure allows platform teams to store the workspace data securely, control resource usage, harden security, and troubleshoot the deployments in known ways.\n\nThis blog post is a long read so feel free to navigate to the sections of interest. However, if you want to follow the tutorial step by step, the sections depend on one another for the parts pertaining to infrastructure setup.\n\n- [Development environments on your infrastructure](#development-environments-on-your-infrastructure)\n- [Requirements](#requirements)\n    - [Workspaces domain](#workspaces-domain)\n    - [TLS certificates](#tls-certificates)\n- [GitLab OAuth application ](#gitlab-oauth-application)\n- [Kubernetes cluster setup](#kubernetes-cluster-setup)\n    - [Set up infrastructure with Google Kubernetes Engine (GKE)](#set-up-infrastructure-with-google-kubernetes-engine=gke)\n    - [Set up infrastructure with Amazon Elastic Kubernetes Service (EKS)](#set-up-infrastructure-with-amazon-elastic-kubernetes-service-eks)\n    - [Set up infrastructure with Azure Managed Kubernetes Service (AKS)](#set-up-infrastructure-with-azure-managed-kubernetes-service-aks)\n    - [Set up infrastructure with Civo Cloud Kubernetes](#set-up-infrastructure-with-civo-cloud-kubernetes)\n    - [Set up infrastructure with self-managed Kubernetes](#set-up-infrastructure-with-self-managed-kubernetes)\n- [Workspaces proxy installation into Kubernetes](#workspaces-proxy-installation-into-kubernetes)\n- [Agent for Kubernetes installation](#agent-for-kubernetes-installation)\n- [Workspaces creation](#workspaces-creation)\n    - [Create the first workspaces](#create-the-first-workspaces)\n    - [Custom workspace container images](#custom-workspace-container-images)\n- [Tips](#tips)\n    - [Certificate management](#certificate-management)\n    - [Troubleshooting](#troubleshooting)\n    - [Contribute](#contribute)\n- [Share your feedback](#share-your-feedback)\n\n## Development environments on your infrastructure\nSecure, on-demand, cloud-based development workspaces are [available in beta for public projects](/blog/introducing-workspaces-beta/) for Premium and Ultimate customers. The first iteration allows you to bring your own infrastructure as a Kubernetes cluster. GitLab already deeply integrates with Kubernetes through the GitLab agent for Kubernetes, setting the foundation for configuration and cluster management.\n\nUsers can define and use a development environment template in a project. Workspaces in GitLab support the [devfile specification](https://docs.gitlab.com/ee/user/workspace/#devfile) as `.devfile.yaml` in the project repository root. The devfile attributes allow configuring of the workspace. For example, the `image` attribute specifies the container image to run and create the workspace in isolated container environments. The containers require a cluster orchestrator, such as Kubernetes, that manages resource usage and ensures data security and safety. Workspaces also need authorization: Project source code may contain sensitive intellectual property or otherwise confidential data in specific environments. The setup requires a GitLab OAuth application as the foundation here.\n\nThe following steps provide an in-depth setup guide for different cloud providers. If you prefer to set up your own environment, please follow the [documentation for workspace prerequisites](https://docs.gitlab.com/ee/user/workspace/#prerequisites). In general, we will practice the following steps:\n0. (Optional) Register a workspaces domain, and create TLS certificates.\n1. Create a Kubernetes cluster and configure access and requirements.\n2. Install an Ingress controller.\n3. Set up the workspaces proxy with the domain, TLS certificates, and OAuth app.\n4. Create a new GitLab group with a GitLab agent project. The agent can be used for all projects in that group.\n5. Install the GitLab agent for Kubernetes using the UI provided Helm chart command.\n6. Create an example project with a devfile configuration for workspaces.\n\nSome commands do not use the terminal indicator (`$` or `#`) to support easier copy-paste of command blocks into terminals.\n\n## Requirements\nThe steps in this blog post require the following CLI tools:\n1. `kubectl` and `helm` for Kubernetes\n2. `certbot` for Let's Encrypt\n3. git, curl, dig, openssl, and sslscan for troubleshooting\n\n### Workspaces domain\nWorkspaces require a domain with DNS entries. Cloud providers, for example, Google Cloud, also provide domain services which integrate more easily. You can also register and manage domains with your preferred provider.\n\nThe required DNS entries will be:\n- Wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`) A/AAAA records pointing to the external Kubernetes external IP: `kubectl get services -A`\n- (Optional, with Let's Encrypt) ACME DNS challenge entries as TXT records\n\nAfter acquiring a domain, wait until the Kubernetes setup is ready and extract the A/AAAA records for the DNS settings. The following example shows how `remote-dev.dev` is configured in the Google Cloud DNS service.\n\n![GitLab remote development workspaces, example DNS configuration for remote-dev.dev](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_google_cloud_dns_remote-dev.dev-entries.png){: .shadow}\n\nExport shell variables that define the workspaces domains, and the email contact. These variables will be used in all setup steps below.\n\n```\nexport EMAIL=\"user@company.com\"\nexport GITLAB_WORKSPACES_PROXY_DOMAIN=\"remote-dev.dev\"\nexport GITLAB_WORKSPACES_WILDCARD_DOMAIN=\"*.remote-dev.dev\"\n```\n\n**Note:** This blog post will show the example domain `remote-dev.dev` for better understanding with a working example. The domain `remote-dev.dev` is maintained by the [Developer Evangelism team at GitLab](/handbook/marketing/developer-relations/developer-evangelism/projects/). There are no public demo environments available at the time of writing this blog post.\n\n### TLS certificates\nTLS certificates can be managed with different methods. To get started quickly, it is recommended to follow the [documentation steps](https://docs.gitlab.com/ee/user/workspace/#prerequisites) with Let's Encrypt and later consider production requirements with TLS certificates.\n\n```shell\ncertbot -d \"${GITLAB_WORKSPACES_PROXY_DOMAIN}\" \\\n  -m \"${EMAIL}\" \\\n  --config-dir ~/.certbot/config \\\n  --logs-dir ~/.certbot/logs \\\n  --work-dir ~/.certbot/work \\\n  --manual \\\n  --preferred-challenges dns certonly\n\n  certbot -d \"${GITLAB_WORKSPACES_WILDCARD_DOMAIN}\" \\\n  -m \"${EMAIL}\" \\\n  --config-dir ~/.certbot/config \\\n  --logs-dir ~/.certbot/logs \\\n  --work-dir ~/.certbot/work \\\n  --manual \\\n  --preferred-challenges dns certonly\n```\n\nThe Let's Encrypt CLI prompts you for the ACME DNS challenge. This requires setting TXT records for the challenge session immediately. Add the DNS records and specify a low TTL (time-to-live) of 300 seconds to update the records during the first steps.\n\n```\n_acme-challenge TXT \u003Cstringfromletsencryptacmechallenge>\n```\n\nYou can verify the DNS records using the `dig` CLI command.\n\n```shell\n$ dig _acme-challenge.remote-dev.dev txt\n...\n;; ANSWER SECTION:\n_acme-challenge.remote-dev.dev.\t246 IN\tTXT\t\"TlGRM9JGdXHGVklPWgytflxWDF82Sv04nF--Wl9JFvg\"\n_acme-challenge.remote-dev.dev.\t246 IN\tTXT\t\"CqG_54w6I0heWF3wLMAmUAitPcUMs9qAU9b8QhBWFj8\"\n```\n\nOnce the Let's Encrypt routine is complete, note the TLS certificate location.\n\n```\nSuccessfully received certificate.\nCertificate is saved at: /Users/mfriedrich/.certbot/config/live/remote-dev.dev/fullchain.pem\nKey is saved at:         /Users/mfriedrich/.certbot/config/live/remote-dev.dev/privkey.pem\nThis certificate expires on 2023-08-15.\nThese files will be updated when the certificate renews.\n\nSuccessfully received certificate.\nCertificate is saved at: /Users/mfriedrich/.certbot/config/live/remote-dev.dev-0001/fullchain.pem\nKey is saved at:         /Users/mfriedrich/.certbot/config/live/remote-dev.dev-0001/privkey.pem\nThis certificate expires on 2023-08-15.\nThese files will be updated when the certificate renews.\n```\n\nExport the TLS certificate paths into environment variables for the following setup steps.\n\n```shell\nexport WORKSPACES_DOMAIN_CERT=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}/fullchain.pem\"\nexport WORKSPACES_DOMAIN_KEY=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}/privkey.pem\"\n\nexport WILDCARD_DOMAIN_CERT=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}-0001/fullchain.pem\"\nexport WILDCARD_DOMAIN_KEY=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}-0001/privkey.pem\"\n```\n\n**Note**: If you prefer to use your certificates, please copy the files into a safe location, and export the environment variables with the path details.\n\n## GitLab OAuth application\n_After preparing the requirements, continue with the components setup._\n\nCreate a [group-owned OAuth application](https://docs.gitlab.com/ee/integration/oauth_provider.html) for the remote development workspaces group. Creating a centrally managed app with a service account or group with limited access is recommended for production use.\n\nNavigate into the group `Settings > Applications` and specify the following values:\n\n1. Name: `Remote Development workspaces by \u003Cresponsible team> - \u003Cdomain>`. Add the reponsible team that is trusted in your organization. For debugging, add the domain. There might be multiple authorization groups, this helps the identification which workspace domain is used.\n2. Redirect URI: `https://\u003CGITLAB_WORKSPACES_PROXY_DOMAIN>/auth/callback`. Replace `GITLAB_WORKSPACES_PROXY_DOMAIN` with the domain string value.\n3. Set the scopes to `api, read_user, openid, profile` .\n\n![GitLab remote development workspaces, OAuth application in the group settings](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_oauth_app_create.png){: .shadow}\n\nStore the OAuth application details in your password vault, and export them as shell environment variables for the next setup steps.\n\nCreate a configuration secret for the proxy as a signing key (`SIGNING_KEY`), and store it in a safe place (for example, use a secrets vault like 1Password to create and store the key).\n\n```\nexport CLIENT_ID=\"XXXXXXXXX\" # Look into password vault and set\nexport CLIENT_SECRET=\"XXXXXXXXXX\" # Look into password vault and set\nexport REDIRECT_URI=\"https://${GITLAB_WORKSPACES_PROXY_DOMAIN}/auth/callback\"\n\nexport GITLAB_URL=\"https://gitlab.com\" # Replace with your self-managed GitLab instance URL if not using GitLab.com SaaS\nexport SIGNING_KEY=\"a_random_key_consisting_of_letters_numbers_and_special_chars\" # Look into password vault and set\n```\n\n## Kubernetes cluster setup\nThe following sections describe how to set up a Kubernetes cluster in different cloud and on-premises environments and install an [ingress controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) for HTTP access. After completing the Kubernetes setup, you can continue with the workspaces proxy and agent setup steps.\n\n**Choose one method to create a Kubernetes cluster. Note: Use `amd64` as platform architecture [until multi-architecture support is available for running workspaces](https://gitlab.com/groups/gitlab-org/-/epics/10594).** Cloud environments with Arm support will not work yet, for example AWS EKS on Graviton EC2 instances.\n\nYou should have defined the following variables from the previous setup steps:\n\n```sh\nexport EMAIL=\"user@company.com\"\nexport GITLAB_WORKSPACES_PROXY_DOMAIN=\"remote-dev.dev\"\nexport GITLAB_WORKSPACES_WILDCARD_DOMAIN=\"*.remote-dev.dev\"\n\nexport WORKSPACES_DOMAIN_CERT=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}/fullchain.pem\"\nexport WORKSPACES_DOMAIN_KEY=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}/privkey.pem\"\n\nexport WILDCARD_DOMAIN_CERT=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}-0001/fullchain.pem\"\nexport WILDCARD_DOMAIN_KEY=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}-0001/privkey.pem\"\n\nexport CLIENT_ID=\"XXXXXXXXX\" # Look into password vault and set\nexport CLIENT_SECRET=\"XXXXXXXXXX\" # Look into password vault and set\nexport REDIRECT_URI=\"https://${GITLAB_WORKSPACES_PROXY_DOMAIN}/auth/callback\"\n\nexport GITLAB_URL=\"https://gitlab.com\" # Replace with your self-managed GitLab instance URL if not using GitLab.com SaaS\nexport SIGNING_KEY=\"XXXXXXXX\" # Look into password vault and set\n\n```\n\n### Set up infrastructure with Google Kubernetes Engine (GKE)\n\n[Install and configure the Google Cloud SDK and `gcloud` CLI](https://cloud.google.com/sdk/docs/install?hl=en), and install the `gke-gcloud-auth-plugin` plugin to authenticate against Google Cloud.\n\n```shell\nbrew install --cask google-cloud-sdk\n\ngcloud components install gke-gcloud-auth-plugin\n\ngcloud auth login\n```\n\nCreate a new GKE cluster using the `gcloud` command, or follow the steps in the Google Cloud Console.\n\n```shell\n\nexport GCLOUD_PROJECT=group-community\nexport GCLOUD_CLUSTER=de-remote-development-1\n\ngcloud config set project $GCLOUD_PROJECT\n\n# Create cluster (modify for your needs)\ngcloud container clusters create $GCLOUD_CLUSTER \\\n    --release-channel stable \\\n    --zone us-central1-c \\\n    --project $GCLOUD_PROJECT\n\n# Verify cluster\ngcloud container clusters list\n\nNAME                     LOCATION         MASTER_VERSION   MASTER_IP       MACHINE_TYPE  NODE_VERSION       NUM_NODES  STATUS\nde-remote-development-1  us-central1-c    1.26.3-gke.1000  34.136.33.199   e2-medium     1.26.3-gke.1000    3          RUNNING\n\ngcloud container clusters get-credentials $GCLOUD_CLUSTER --zone us-central1-c --project $GCLOUD_PROJECT\nFetching cluster endpoint and auth data.\nkubeconfig entry generated for de-remote-development-1.\n```\n\n1. The setup requires the [`Kubernetes Engine Admin` role in Google IAM](https://cloud.google.com/kubernetes-engine/docs/concepts/access-control?hl=en#recommendations) to create ClusterRoleBindings.\n2. Create a new Kubernetes cluster (do not use Autopilot).\n3. Ensure that [cluster autoscaling](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler?hl=en) is enabled in the GKE cluster.\n4. Verify that a [default Storage Class](https://cloud.google.com/kubernetes-engine/docs/concepts/persistent-volumes?hl=en#storageclasses) has been defined.\n5. Install an Ingress controller, for example [ingress-nginx](https://kubernetes.github.io/ingress-nginx/deploy/#gce-gke). Follow the documentation and run the following commands to install `ingress-nginx` into the Kubernetes cluster.\n\n```shell\nkubectl create clusterrolebinding cluster-admin-binding \\\n  --clusterrole cluster-admin \\\n  --user $(gcloud config get-value account)\n\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.7.1/deploy/static/provider/cloud/deploy.yaml\n```\n\nPrint the external IP for the DNS records, and update wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`).\n\n```shell\ngcloud container clusters list\n\nkubectl get services -A\n```\n\n### Set up infrastructure with Amazon Elastic Kubernetes Service (EKS)\nCreating an Amazon EKS cluster requires [cluster IAM roles](https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html). You can the [`eksctl` CLI for Amazon EKS](https://eksctl.io/), which automatically creates the roles. `eksctl` [requires the AWS IAM Authenticator for Kubernetes](https://github.com/weaveworks/eksctl/blob/main/README.md#prerequisite), which will get pulled with Homebrew automatically on macOS.\n\n```shell\nbrew install eksctl awscli aws-iam-authenticator\naws configure\n\neksctl create cluster --name remote-dev \\\n    --region us-west-2 \\\n    --node-type m5.xlarge \\\n    --nodes 3 \\\n    --nodes-min=1 \\\n    --nodes-max=4 \\\n    --version=1.26 \\\n    --asg-access\n```\n\nThe eksctl command uses the [`--asg-access`, `--nodes-min/max` parameters for auto-scaling](https://eksctl.io/usage/autoscaling/). The autoscaler requires [additional configuration steps](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md), alternatively [Karpenter is supported in Amazon EKS](https://karpenter.sh/docs/getting-started/getting-started-with-karpenter/). Review the [autoscaling documentation](https://docs.aws.amazon.com/eks/latest/userguide/autoscaling.html), and [default Storage Class `gp2`](https://docs.aws.amazon.com/eks/latest/userguide/storage-classes.html) fulfilling the requirements. The Kubernetes configuration is automatically updated locally.\n\nInstall the [Nginx Ingress controller for EKS](https://kubernetes.github.io/ingress-nginx/deploy/#aws). Follow the documentation and run the following command to install `ingress-nginx` into the Kubernetes cluster.\n\n```shell\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.0/deploy/static/provider/aws/deploy.yaml\n```\n\nPrint the external IP for the DNS records, and update wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`).\n\n```shell\neksctl get cluster --region us-west-2 --name remote-dev\n\nkubectl get services -A\n```\n\n### Set up infrastructure with Azure Managed Kubernetes Service (AKS)\nInstall [Azure CLI](https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-deploy-cli).\n\n```shell\nbrew install azure-cli\n\naz login\n```\n\nReview the documentation for the [cluster autoscaler in AKS](https://learn.microsoft.com/en-us/azure/aks/cluster-autoscaler) and the [default Storage Class being `managed-csi`](https://learn.microsoft.com/en-us/azure/aks/concepts-storage#storage-classes), create a new resource group, and create a new Kubernetes cluster. Download the Kubernetes configuration to continue with the `kubectl` commands.\n\n```shell\naz group create --name remote-dev-rg --location eastus\n\naz aks create \\\n--resource-group remote-dev-rg \\\n--name remote-dev \\\n--node-count 1 \\\n--vm-set-type VirtualMachineScaleSets \\\n--load-balancer-sku standard \\\n--enable-cluster-autoscaler \\\n--min-count 1 \\\n--max-count 3\n\naz aks get-credentials --resource-group remote-dev-rg --name remote-dev\n```\n\nInstall the [Nginx ingress controller in AKS](https://learn.microsoft.com/en-us/azure/aks/ingress-basic?tabs=azure-cli#basic-configuration). Follow the documentation and run the following commands to install `ingress-nginx` into the Kubernetes cluster.\n\n```shell\nNAMESPACE=ingress-basic\n\nhelm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx\nhelm repo update\n\nhelm install ingress-nginx ingress-nginx/ingress-nginx \\\n  --create-namespace \\\n  --namespace $NAMESPACE \\\n  --set controller.service.annotations.\"service\\.beta\\.kubernetes\\.io/azure-load-balancer-health-probe-request-path\"=/healthz\n```\n\nPrint the external IP for the DNS records, and update wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`).\n\n```shell\nkubectl get services --namespace ingress-basic -o wide -w ingress-nginx-controller\n\nkubectl get services -A\n```\n\n### Set up infrastructure with Civo Cloud Kubernetes\nInstall and configure the [Civo CLI](https://www.civo.com/docs/kubernetes/create-a-cluster#creating-a-cluster-using-civo-cli), and create a Kubernetes cluster using 2 nodes, 4 CPUs, 8 GB RAM.\n\n```shell\ncivo kubernetes create remote-dev -n 2 -s g4s.kube.large\n\ncivo kubernetes config remote-dev --save\nkubectl config use-context remote-dev\n```\n\nYou have full permissions on the cluster to create ClusterRoleBindings. The [default Storage Class](https://www.civo.com/docs/kubernetes/kubernetes-volumes#creating-a-persistent-volume-claim-pvc) is set to 'civo-volume'.\n\nInstall the [Nginx Ingress controller using Helm](https://kubernetes.github.io/ingress-nginx/deploy/#quick-start). Follow the documentation and run the following command to install `ingress-nginx` into the Kubernetes cluster.\n\n```shell\nhelm upgrade --install ingress-nginx ingress-nginx \\\n  --repo https://kubernetes.github.io/ingress-nginx \\\n  --namespace ingress-nginx --create-namespace\n\n```\n\nPrint the external IP for the DNS records, and update wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`).\n\n```shell\ncivo kubernetes show remote-dev\n\nkubectl get services -A\n```\n\n### Set up infrastructure with self-managed Kubernetes\nThe process follows similar steps, requiring a user with permission to create `ClusterRoleBinding` resources. The [Nginx Ingress controller](https://kubernetes.github.io/ingress-nginx/deploy/#quick-start) is the fastest path forward. Once the cluster is ready, print the load balancer IP for the DNS records, and create/update A/AAAA record for wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`) pointing to the load balancer IP.\n\n## Workspaces proxy installation into Kubernetes\n_After completing the Kubernetes cluster setup with one of your preferred providers, please continue with the next steps._\n\nAdd the Helm repository for the workspaces proxy (it is using the [Helm charts feature in the GitLab package registry](https://docs.gitlab.com/ee/user/packages/helm_repository/)).\n\n```shell\nhelm repo add gitlab-workspaces-proxy \\\n  https://gitlab.com/api/v4/projects/gitlab-org%2fremote-development%2fgitlab-workspaces-proxy/packages/helm/devel\n```\n\nInstall the gitlab-workspaces-proxy, and optionally [specify the most current chart version](https://gitlab.com/gitlab-org/remote-development/gitlab-workspaces-proxy/-/blob/main/helm/Chart.yaml). If you are using a different ingress controller than Nginx, you need to change the `ingress.className` key. Re-run the command when new TLS certificates need to be installed.\n\n```shell\nhelm repo update\n\nhelm upgrade --install gitlab-workspaces-proxy \\\n  gitlab-workspaces-proxy/gitlab-workspaces-proxy \\\n  --version 0.1.6 \\\n  --namespace=gitlab-workspaces \\\n  --create-namespace \\\n  --set=\"auth.client_id=${CLIENT_ID}\" \\\n  --set=\"auth.client_secret=${CLIENT_SECRET}\" \\\n  --set=\"auth.host=${GITLAB_URL}\" \\\n  --set=\"auth.redirect_uri=${REDIRECT_URI}\" \\\n  --set=\"auth.signing_key=${SIGNING_KEY}\" \\\n  --set=\"ingress.host.workspaceDomain=${GITLAB_WORKSPACES_PROXY_DOMAIN}\" \\\n  --set=\"ingress.host.wildcardDomain=${GITLAB_WORKSPACES_WILDCARD_DOMAIN}\" \\\n  --set=\"ingress.tls.workspaceDomainCert=$(cat ${WORKSPACES_DOMAIN_CERT})\" \\\n  --set=\"ingress.tls.workspaceDomainKey=$(cat ${WORKSPACES_DOMAIN_KEY})\" \\\n  --set=\"ingress.tls.wildcardDomainCert=$(cat ${WILDCARD_DOMAIN_CERT})\" \\\n  --set=\"ingress.tls.wildcardDomainKey=$(cat ${WILDCARD_DOMAIN_KEY})\" \\\n  --set=\"ingress.className=nginx\"\n```\n\nThe chart installs and configures the ingress automatically. You can verify the setup by getting the `Ingress` resource type:\n\n```shell\nkubectl get ingress -n gitlab-workspaces\n\nNAME                      CLASS   HOSTS                             ADDRESS   PORTS     AGE\ngitlab-workspaces-proxy   nginx   remote-dev.dev,*.remote-dev.dev             80, 443   9s\n```\n\n### Agent for Kubernetes installation\nCreate the agent configuration file in `.gitlab/agents/\u003Cagentname>/config.yaml`, add to git, and push it into the repository. The `remote_development` key specifies the `dns_zone`, which must be set to the workspaces domain. Additionally, the integration needs to be enabled. The `observability` key intentionally configures [debug logging](https://docs.gitlab.com/ee/user/clusters/agent/work_with_agent.html#debug-the-agent) for the first setup to troubleshoot faster. You can adjust the `logging` levels for production usage.\n\n```shell\nexport GL_AGENT_K8S=remote-dev-dev\n\n$ mkdir agent-kubernetes && cd agent-kubernetes\n$ mkdir -p .gitlab/agents/${GL_AGENT_K8S}/\n\n$ cat \u003C\u003CEOF >.gitlab/agents/${GL_AGENT_K8S}/config.yaml\nremote_development:\n    enabled: true\n    dns_zone: \"${GITLAB_WORKSPACES_PROXY_DOMAIN}\"\n\nobservability:\n  logging:\n    level: debug\n    grpc_level: warn\nEOF\n\n$ git add .gitlab/agents/${GL_AGENT_K8S}/config.yaml\n$ git commit -avm \"Add agent for Kubernetes configuration\"\n# adjust the URL to your GitLab server URL and project path\n$ git remote add origin https://gitlab.example.com/remote-dev-workspaces/agent-kubernetes.git\n# will create a private project when https/PAT is used\n$ git push\n```\n\nOpen the GitLab project in your browser, navigate into `Operate > Kubernetes Clusters`, and click the `Connect a new cluster (agent)` button. Select the agent from the configuration dropdown, and click `Register`. The form generates a ready-to-use Helm chart CLI command. Similar to the command below, replace `XXXXXXXXXXREPLACEME` with the actual token value.\n\n```shell\nhelm repo add gitlab https://charts.gitlab.io\nhelm repo update\nhelm upgrade --install remote-dev-dev gitlab/gitlab-agent \\\n    --namespace gitlab-agent-remote-dev-dev \\\n    --create-namespace \\\n    --set image.tag=v16.0.1 \\\n    --set config.token=XXXXXXXXXXREPLACEME \\\n    --set config.kasAddress=wss://kas.gitlab.com # Replace with your self-managed GitLab KAS instance URL if not using GitLab.com SaaS\n```\n\nRun the commands, and verify that the agent is connected in the `Operate > Kubernetes Clusters` overview. You can access the pod logs using the following command:\n\n```shell\n$ kubectl get ns\nNAME                          STATUS   AGE\ngitlab-agent-remote-dev-dev   Active   9d\ngitlab-workspaces             Active   22d\n...\n\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-agent -n gitlab-agent-$GL_AGENT_K8S\n```\n\n_Congrats! Your infrastructure setup for on-demand, cloud-based development environments is complete._\n\n## Workspaces creation\nAfter completing the infrastructure setup, you must verify that all components work together and users can create workspaces. You can fork or import the [`example-python-http-simple` project](https://gitlab.com/gitlab-de/use-cases/remote-development/example-python-http-simple) into your GitLab group with access to the GitLab agent for Kubernetes to try it immediately. The project provides a simple Python web app with Flask that provides different HTTP routes. Alternatively, start with a new project and create a `.devfile.yaml` with the [example configuration](https://docs.gitlab.com/ee/user/workspace/#example-configurations).\n\nOptional: Inspect the [`.devfile.yaml`](https://docs.gitlab.com/ee/user/workspace/#devfile) file to learn about the configuration format. We will look into the `image` key later.\n\n```yaml\nschemaVersion: 2.2.0\ncomponents:\n  - name: py\n    attributes:\n      gl/inject-editor: true\n    container:\n      # Use a custom image that supports arbitrary user IDs.\n      # NOTE: THIS IMAGE IS NOT ACTIVELY MAINTAINED. DEMO USE CASES ONLY, DO NOT USE IN PRODUCTION.\n      # Source: https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id\n      image: registry.gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id:latest\n      memoryRequest: 1024M\n      memoryLimit: 2048M\n      cpuRequest: 500m\n      cpuLimit: 1000m\n      endpoints:\n        - name: http-python\n          targetPort: 8080\n```\n\n### Create the first workspaces\nNavigate to the `Your Work > Workspaces` menu and create a new workspace. Search for the project name, select the agent for Kubernetes, and create the workspace.\n\n![GitLab remote development workspaces, Python example](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_python.png){: .shadow}\n\nOpen two terminals to follow the workspaces proxy and agent logs in the Kubernetes cluster.\n\n```shell\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-workspaces-proxy -n gitlab-workspaces\n\n{\"level\":\"info\",\"ts\":1686331102.886607,\"caller\":\"server/server.go:74\",\"msg\":\"Starting proxy server...\"}\n{\"level\":\"info\",\"ts\":1686331133.146862,\"caller\":\"upstream/tracker.go:47\",\"msg\":\"New upstream added\",\"host\":\"8080-workspace-62029-5534214-2vxdxq.remote-dev.dev\",\"backend\":\"workspace-62029-5534214-2vxdxq.gl-rd-ns-62029-5534214-2vxdxq\",\"backend_port\":8080}\n2023/06/09 17:21:10 getHostnameFromState state=https://60001-workspace-62029-5534214-2vxdxq.remote-dev.dev/folder=/projects/demo-python-http-simple\n```\n\n```shell\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-agent -n gitlab-agent-$GL_AGENT_K8S\n\n{\"level\":\"debug\",\"time\":\"2023-06-09T18:36:19.839Z\",\"msg\":\"Applied event\",\"mod_name\":\"remote_development\",\"apply_event\":\"WaitEvent{ GroupName: \\\"wait-0\\\", Status: \\\"Pending\\\", Identifier: \\\"gl-rd-ns-62029-5534214-k66cjy_workspace-62029-5534214-k66cjy-gl-workspace-data__PersistentVolumeClaim\\\" }\",\"agent_id\":62029}\n{\"level\":\"debug\",\"time\":\"2023-06-09T18:36:19.866Z\",\"msg\":\"Received update event\",\"mod_name\":\"remote_development\",\"workspace_namespace\":\"gl-rd-ns-62029-5534214-k66cjy\",\"workspace_name\":\"workspace-62029-5534214-k66cjy\",\"agent_id\":62029}\n{\"level\":\"debug\",\"time\":\"2023-06-09T18:36:43.627Z\",\"msg\":\"Applied event\",\"mod_name\":\"remote_development\",\"apply_event\":\"WaitEvent{ GroupName: \\\"wait-0\\\", Status: \\\"Successful\\\", Identifier: \\\"gl-rd-ns-62029-5534214-k66cjy_workspace-62029-5534214-k66cjy_apps_Deployment\\\" }\",\"agent_id\":62029}\n```\n\nWait until the workspace is provisioned successfully, and click to open the HTTP URL, example format `https://60001-workspace-62029-5534214-2vxdxq.remote-dev.dev/?folder=%2Fprojects%2Fexample-python-http-simple`. The GitLab OAuth application will ask you for authorization.\n\n![GitLab OAuth provider app, example with the Developer Evangelism demo environment](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_oauth_app.png){: .shadow}\n\nYou can select the Web IDE menu, open a new terminal (`cmd shift p` and search for `terminal create`). More shortcuts and Web IDE usage are documented [here](https://docs.gitlab.com/ee/user/project/web_ide/).\n\n![GitLab remote development workspaces, Python example, create terminal](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_python_web_ide_create_terminal.png){: .shadow}\n\nUsing the Python example project, try to run the `hello.py` file with the Python interpreter after changing the terminal to `bash` to access auto-completion and shell history. Type `pyth`, press tab, type `hel`, press tab, enter.\n\n```shell\n$ bash\n\n$ python hello.py\n```\n\nThe command will fail because the Python requirements still need to be installed. Let us fix that by running the following command:\n\n```shell\n$ pip install -r requirements.txt\n```\n\n![GitLab remote development workspaces, Python example, install requirements in the terminal](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_python_web_ide_terminal_install_pip.png){: .shadow}\n\n**Note**: This example is intentionally kept simple, and does not use best practices with `pyenv` for managing Python environments. We will explore development environment templates in future blog posts.\n\nRun the Python application `hello.py` again to start the web server on port 8080.\n\n```shell\n$ python hello.py\n```\n\nYou can access the exposed port by modifying the URL from the default port at the beginning of the URL to the exposed port `8080`. The `?folder` URL parameter can also be removed.\n\n```diff\n-https://60001-workspace-62029-5534214-kbtcmq.remote-dev.dev/?folder=/projects/example-python-http-simple\n+https://8080-workspace-62029-5534214-kbtcmq.remote-dev.dev/\n```\n\nThe URL is not publicly available and requires access through the GitLab OAuth session.\n\n![GitLab remote development workspaces, Python example, run webserver, access HTTP](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_python_web_ide_terminal_run_webserver_access_http.png){: .shadow}\n\nModifying the workspace requires custom container images supporting to run with [arbitrary user IDs](https://docs.gitlab.com/ee/user/workspace/#arbitrary-user-ids). The example project uses a custom image which allows to install Python dependencies and create build artifacts. It also allows to use the bash terminal shown above. Learn more about custom image creation in the next section.\n\n### Custom workspace container images\nCustom container images require support for [arbitrary user IDs](https://docs.gitlab.com/ee/user/workspace/#arbitrary-user-ids). You can build custom container images with [GitLab CI/CD](/solutions/continuous-integration/) and use the [GitLab container registry](https://docs.gitlab.com/ee/user/packages/container_registry/) to distribute the container images on the DevSecOps platform.\n\nWorkspaces run with arbitrary user IDs in the Kubernetes cluster containers and manage resource access with Linux group permissions. Existing container images may need to be changed, and imported as base image for new container images. The [following example](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id/-/blob/main/Dockerfile) uses the `python:3.11-slim-bullseye` image from Docker Hub as a base container image in the `FROM` key. The next steps create and set a home directory in `/home/gitlab-workspaces`, and manage user and group access to specified directories. Additionally, you can install more convenience tools and configurations into the image, for example the `git` package.\n\n[`Dockerfile`](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id/-/blob/main/Dockerfile)\n```\n# Example demo for a Python-based container image.\n# NOTE: THIS IMAGE IS NOT ACTIVELY MAINTAINED. DEMO USE CASES ONLY, DO NOT USE IN PRODUCTION.\n\nFROM python:3.11-slim-bullseye\n\n# User id for build time. Runtime will be an arbitrary random ID.\nRUN useradd -l -u 33333 -G sudo -md /home/gitlab-workspaces -s /bin/bash -p gitlab-workspaces gitlab-workspaces\n\nENV HOME=/home/gitlab-workspaces\n\nWORKDIR $HOME\n\nRUN mkdir -p /home/gitlab-workspaces && chgrp -R 0 /home && chmod -R g=u /etc/passwd /etc/group /home\n\n# TODO: Add more convenience tools into the user home directory, i.e. enable color prompt for the terminal, install pyenv to manage Python environments, etc\nRUN apt update && \\\n    apt -y --no-install-recommends install git procps findutils htop vim curl wget && \\\n    rm -rf /var/lib/apt/lists/*\n\nUSER gitlab-workspaces\n```\n\n **As an exercise**, [fork the project](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id) and modify the package installation step in the `Dockerfile` file to install the `dnsutils` package on the Debian based image to get access to the `dig` command.\n\n[`Dockerfile`](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id/-/blob/main/Dockerfile)\n```diff\n-RUN apt update && \\\n-    apt -y --no-install-recommends install git procps findutils htop vim curl wget && \\\n-    rm -rf /var/lib/apt/lists/*\n+RUN apt update && \\\n+    apt -y --no-install-recommends install git procps findutils htop vim curl wget dnsutils && \\\n+    rm -rf /var/lib/apt/lists/*\n```\n\n[Build the container image](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html) with your preferred CI/CD workflow. On GitLab.com SaaS, you can include the `Docker.gitlab-ci.yml` template which takes care of building the image.\n\n```yaml\ninclude:\n    - template: Docker.gitlab-ci.yml\n```\n\nWhen building the container images manually, use Linux and `amd64` as platform architecture [until multi-architecture support is available for running workspaces](https://gitlab.com/groups/gitlab-org/-/epics/10594). Also, review the [optimizing images guide in the documentation](https://docs.gitlab.com/ee/ci/pipelines/pipeline_efficiency.html#optimize-docker-images) when creating custom container images to optimize size and build times.\n\nNavigate into `Deploy > Container Registry` in the GitLab UI and copy the image URL from the tagged image. Open the `.devfile.yaml` file in the forked GitLab project `example-python-http-simple`, and change the `image` path to the newly built image URL.\n\n[`.devfile.yaml`](https://gitlab.com/gitlab-de/use-cases/remote-development/example-python-http-simple/-/blob/main/.devfile.yaml)\n```diff\n-      image: registry.gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id:latest\n+      image: registry.gitlab.example.com/remote-dev-workspaces/python-remote-dev-workspaces-user-id:latest\n```\n\nNavigate into `Your Work > Workspaces` and create a new workspace for the project, and try to execute the `dig` command to query the IPv6 address of GitLab.com (or any other internal domain).\n\n```shell\n$ dig +short gitlab.com AAAA\n```\n\nThe custom container image project is located [here](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id/).\n\n## Tips\nThis blog post's setup steps with environment variables are easy to follow. For production usage, use automation to manage your environment with Terraform, Ansible, etc.\n\n- Terraform: [Provision a GKE Cluster (Google Cloud)](https://developer.hashicorp.com/terraform/tutorials/kubernetes/gke), [Provision an EKS Cluster (AWS)](https://developer.hashicorp.com/terraform/tutorials/kubernetes/eks), [Provision an AKS Cluster (Azure)](https://developer.hashicorp.com/terraform/tutorials/kubernetes/aks), [Deploy Applications with the Helm Provider](https://developer.hashicorp.com/terraform/tutorials/kubernetes/helm-provider)\n- Ansible: [google.cloud.gcp_container_cluster module](https://docs.ansible.com/ansible/latest/collections/google/cloud/gcp_container_cluster_module.html), [community.aws.eks_cluster module](https://docs.ansible.com/ansible/latest/collections/community/aws/eks_cluster_module.html), [azure.azcollection.azure_rm_aks module](https://docs.ansible.com/ansible/latest/collections/azure/azcollection/azure_rm_aks_module.html), [kubernetes.core collection](https://docs.ansible.com/ansible/latest/collections/kubernetes/core/index.html#plugin-index)\n\n### Certificate management\nThe workspaces domain requires a valid TLS certificate. The examples above used certbot with Let's Encrypt, requiring a certificate renewal after three months. Depending on your corporate requirements, you may need to create TLS certificates signed by the corporate CA identity and manage the certificates. Alternatively, you can look into solutions like [cert-manager for Kubernetes](https://cert-manager.io/docs/getting-started/) that will help renew certificates automatically.\n\nDo not forget to add TLS certificate validity monitoring to avoid unforeseen errors. The [blackbox exporter for Prometheus](https://github.com/prometheus/blackbox_exporter) can help with monitoring TLS certificate expiry and send alerts.\n\n### Troubleshooting\nHere are a few tips for troubleshooting connections and inspecting the cluster resources.\n\n#### Verify the connections\nTry to connect to the workspaces domain to see whether the Kubernetes Ingress controller responds to HTTP requests.\n\n```shell\n$ curl -vL ${GITLAB_WORKSPACES_PROXY_DOMAIN}\n```\n\nInspect the logs of the proxy deployment to follow connection requests. Since the proxy requires an authorization token sent via the OAuth app, an HTTP 400 error is expected for unauthenticated curl requests.\n\n```shell\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-workspaces-proxy -n gitlab-workspaces\n```\n\nCheck if the TLS certificate is valid. You can also use `sslcan` and other tools.\n\n```shell\n$ openssl s_client -connect ${GITLAB_WORKSPACES_PROXY_DOMAIN}:443\n\n$ sslcan ${GITLAB_WORKSPACES_PROXY_DOMAIN}\n```\n\n[Debug the agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/work_with_agent.html#debug-the-agent) and inspect the pod logs.\n\n```shell\n$ kubectl get ns\n\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-agent -n gitlab-agent-\u003CNAMESPACENAME>\n```\n\n#### Workspaces cannot be created even if the agent is connected\nWhen the workspaces deployment is spinning and nothing happens, try restarting the workspaces proxy and agent for Kubernetes. This is a known problem and tracked [in this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/414399#note_1426652421).\n\n```shell\n$ kubectl rollout restart deployment -n gitlab-workspaces\n\n$ kubectl rollout restart deployment -n gitlab-agent-$GL_AGENT_K8S\n```\n\nIf the agent for Kubernetes remains unresponsive, consider a complete reinstall. First, navigate into the GitLab UI into `Operate > Kubernetes Clusters` and [delete the agent](https://docs.gitlab.com/ee/user/clusters/agent/work_with_agent.html#remove-an-agent-through-the-gitlab-ui). Next, use the following commands to delete the Helm release from the cluster, and run the installation command generated from the UI again.\n\n```shell\nkubectl get ns\nhelm list -A\n\nexport RELEASENAME=xxx\nexport NAMESPACENAME=xxx\nexport TOKEN=XXXXXXXXXXREPLACEME\nhelm uninstall ${RELEASENAME} -n gitlab-agent-${NAMESPACENAME}\n\nhelm repo add gitlab https://charts.gitlab.io\nhelm repo update\n\nhelm upgrade --install ${RELEASENAME} gitlab/gitlab-agent \\\n    --namespace gitlab-agent-${NAMESPACENAME} \\\n    --create-namespace \\\n    --set image.tag=v16.1.2 \\\n    --set config.token=${TOKEN} \\\n    --set config.kasAddress=wss://kas.gitlab.com # Replace with your self-managed GitLab KAS instance URL if not using GitLab.com SaaS\n```\n\nExample: `helm uninstall remote-dev-dev -n gitlab-agent-remote-dev-dev`\n\n#### Cannot modify workspace using custom images\nIf you cannot modify the workspace, open a new terminal and check the user id and their groups.\n\n```shell\n$ id\n```\n\nInspect the `.devfile.yaml` file in the project and extract the `image` attribute to test the used container image. You can use container CLI, for example `docker` that runs the container with a different user ID. Note: You can use any user ID to test the behavior.\n\nTip: Use grep and cut commands to extract the image attribute URL from the `.devfile.yaml`.\n\n```shell\n$ cat .devfile.yaml | grep image: | cut -f2 -d ':')\n```\n\nRun the following command to execute the `id` command in the container, and print the user information.\n\n```\n$ docker run -u 1234 -ti registry.gitlab.com/path/to/project/image:tagname id\n```\n\nTry to modify the workspace by running the command `echo 'Hi' >> ~/example.md`. This can fail with a permission error.\n\n```shell\n$ docker run -u 1234 -ti registry.gitlab.com/path/to/project/image:tagname echo 'Hi' >> ~/example.md\n```\n\nIf the above command failed, the Linux user group does not have enough permissions to modify the file. You can view the permissions using the `ls` command.\n\n```shell\n$ docker run -u 1234 -ti registry.gitlab.com/path/to/project/image:tagname ls -lart ~/\n```\n\n### Contribute\nThe [remote development developer documentation](https://gitlab.com/gitlab-org/remote-development/gitlab-remote-development-docs) provides insights into the [architecture blueprint](https://docs.gitlab.com/ee/architecture/blueprints/remote_development/) and how to set up a local development environment to [start contributing](/community/contribute/). In the future, we will be able to use remote development workspaces to develop remote development workspaces.\n\n## Share your feedback\nIn this blog post, you have learned how to manage the infrastructure for remote development workspaces, create your first workspace, and more tips on custom workspace images and troubleshooting. Using the same development environment across organizations and communities, developers can focus on writing code and get fast preview feedback (i.e., by running a web server that can be accessed externally in the remote workspace). Providing the same reproducible environment also helps opensource contributors to reproduce bugs and provide feedback most efficiently. They can use the same best practices as upstream maintainers.\n\nDevelopers and DevOps engineers will be using the Web IDE in workspaces. Later, being able to [connect their desktop client to workspaces](https://gitlab.com/groups/gitlab-org/-/epics/10478), they can take advantage of even more efficiency with the [most comprehensive AI-powered DevSecOps platform](/gitlab-duo/): Code suggestions and more AI-powered workflows are just one fingertip away.\n\nWhat will your teams build with remote development workspaces? Please share your experiences in the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/410031), blog about your setup, and join our [community forum](https://forum.gitlab.com/) for more discussions.\n\nCover image by [Nick Karvounis](https://unsplash.com/@nickkarvounis) on [Unsplash](https://unsplash.com/photos/SmIM3m8f3Pw)",[495,2744,722,9,1925],{"slug":3443,"featured":6,"template":699},"set-up-infrastructure-for-cloud-development-environments","content:en-us:blog:set-up-infrastructure-for-cloud-development-environments.yml","Set Up Infrastructure For Cloud Development Environments","en-us/blog/set-up-infrastructure-for-cloud-development-environments.yml","en-us/blog/set-up-infrastructure-for-cloud-development-environments",{"_path":3449,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3450,"content":3455,"config":3461,"_id":3463,"_type":13,"title":3464,"_source":15,"_file":3465,"_stem":3466,"_extension":18},"/en-us/blog/sharing-slis-across-departments",{"title":3451,"description":3452,"ogTitle":3451,"ogDescription":3452,"noIndex":6,"ogImage":1086,"ogUrl":3453,"ogSiteName":685,"ogType":686,"canonicalUrls":3453,"schema":3454},"How we share SLIs across engineering departments","The Scalability team engages with the Development department for collaborating on SLIs. The first post in this series explains how we made available information accessible for development groups.","https://about.gitlab.com/blog/sharing-slis-across-departments","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we share SLIs across engineering departments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bob Van Landuyt\"}],\n        \"datePublished\": \"2022-03-10\",\n      }",{"title":3451,"description":3452,"authors":3456,"heroImage":1086,"date":3458,"body":3459,"category":832,"tags":3460},[3457],"Bob Van Landuyt","2022-03-10","\nAt GitLab everyone can contribute to GitLab.com's availability. We\nmeasure the availability using several Service Level Indicators (SLIs)\nBut it's not always easy to see how the features you're building are\nperforming. GitLab's features are divided amongst development groups,\nand every group has [their own dashboard](https://docs.gitlab.com/ee/development/stage_group_observability/index.html)\ndisplaying an availability score.\n\n![Stage group availability](https://about.gitlab.com/images/blogimages/2022-02-share-infrastructure-slis/2022-02-23-code_review_availability.png)\n\nWhen a group's availability goes below 99.95%, we work with the group\non figuring out why that is and how we can improve the performance or\nreliability of the features that caused their number to drop. The\n99.95% service level objective (SLO) is the same target the\ninfrastructure department has set for\n[GitLab.com availability](/handbook/engineering/infrastructure/performance-indicators/#gitlabcom-availability).\n\nBy providing specific data about how features perform on our production systems, it has become easier to recognize when it is important to prioritize performance and availability work.\n\n## Service availability on GitLab.com\n\nOur infrastructure is separated into multiple services, handling\ndifferent kinds of traffic but running the same monolithic Rails\napplication. Not all features have a similar usage pattern. For\nexample, on the service handling web requests for GitLab.com we see a\nlot more requests related to `code_review` or `team_planning` than we\ndo related to `source_code_management`. It's important that we\nlook at these in isolation as well as a service aggregate.\n\nThere's nobody who knows better how to interpret these numbers in\nfeature aggregations than the people who build these features.\n\nThis number is sourced by the same SLIs that we use to monitor\nGitLab.com's availability. We calculate this by dividing the number of\nsuccessful measurements by the total number of measurements over the\npast 28 days. A measurement could be several things, most commonly a\nrequest handled by our Rails application or a background job.\n\n## Monitoring feature and service availability\n\nFor monitoring GitLab.com we have Grafana dashboards, generated using\n[Grafonnet](https://grafana.github.io/grafonnet-lib/), that show these\nsource metrics in several dimensions. For example, these are error\nrates of our monolithic Rails application, separated by feature:\n\n![Puma SLI by feature](https://about.gitlab.com/images/blogimages/2022-02-share-infrastructure-slis/2022-02-23-puma_sli_per_feature.png)\n\nWe also generate [multiwindow, multi-burn-rate alerts](https://sre.google/workbook/alerting-on-slos/#short_and_long_windows_for_alerting)\nas defined in Google's SRE workbook.\n\n![Puma SLI error rate and requests per second](https://about.gitlab.com/images/blogimages/2022-02-share-infrastructure-slis/2022-02-23-puma_sli.png)\n\nThe red lines represent alerting thresholds for a burn rate. The\nthin threshold means we'll alert if the SLI has spent more than 5%\nof its monthly error budget in the past 6 hours. The thicker\nthreshold means we'll alert when the SLI has not met SLO for more than\n2% of measurements in the past hour.\n\nBecause both GitLab.com's availability number and the availability\nnumber for development groups are sourced by the same metrics, we\ncan provide similar alerts and graphs tailored to the\ndevelopment groups. Features with a relatively low amount of traffic would not easily show\nproblems in our bigger service aggregations. With this mechanism we can see those problems\nand put them on the radar of the teams building those features.\n\n## Building and adoption\n\nIn upcoming posts, we will talk about how we built this tooling and how we worked with other teams to have this adopted into the product prioritization process.\n\n## Related content\n\n- [Our project to provide more detailed data on the stage group dashboards](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/664)\n- [Development documentation for how to change dashboard content](https://docs.gitlab.com/ee/development/stage_group_observability/index.html)\n",[743,1074,9,696],{"slug":3462,"featured":6,"template":699},"sharing-slis-across-departments","content:en-us:blog:sharing-slis-across-departments.yml","Sharing Slis Across Departments","en-us/blog/sharing-slis-across-departments.yml","en-us/blog/sharing-slis-across-departments",{"_path":3468,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3469,"content":3475,"config":3479,"_id":3481,"_type":13,"title":3482,"_source":15,"_file":3483,"_stem":3484,"_extension":18},"/en-us/blog/simple-trick-for-smaller-screenshots",{"title":3470,"description":3471,"ogTitle":3470,"ogDescription":3471,"noIndex":6,"ogImage":3472,"ogUrl":3473,"ogSiteName":685,"ogType":686,"canonicalUrls":3473,"schema":3474},"One simple trick to make your screenshots 80% smaller","How to compress your screenshots automatically with pngquant and zopfli","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666775/Blog/Hero%20Images/cover.jpg","https://about.gitlab.com/blog/simple-trick-for-smaller-screenshots","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"One simple trick to make your screenshots 80% smaller\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"James Ramsay\"}],\n        \"datePublished\": \"2020-01-30\",\n      }",{"title":3470,"description":3471,"authors":3476,"heroImage":3472,"date":971,"body":3477,"category":1239,"tags":3478},[1298],"**Updated 2020-02-03:** Added macOS Automator instructions.\n\n\nI take screenshots every day to share with others in issues, blog posts,\nemail, and\n\nSlack. I like them to be crisp, high resolution, and importantly the image\nfile size\n\nshould be as small as possible. Keeping the file size small means they are\nboth\n\nfast to upload and to download. This is particularly important when I am\n\nwriting a blog post or documentation.\n\n\nBelow is a quick primer on PNG compression, and instructions for completely\n\nautomating the process.\n\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/_E1f0xXDU3g\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\nWhen you capture a screenshot on your Mac, the image will be saved in the\nPNG-32\n\nformat, with support for 16 million distinct colors and transparency. This\nmeans\n\nthat the screenshot will perfectly capture every pixel on your screen, but\n\nhaving four 8-bit channels for red, green, blue and alpha (transparency) for\n\nevery pixel makes the file very large. If you're interested, you can verify\nthis\n\nyourself using [pngcheck](http://www.libpng.org/pub/png/apps/pngcheck.html).\n\n\nIn practice, the subjects of my screenshots are buttons and forms, not\n\nphotographs.  While you might want 16 million colors for photos, we don't\nneed them for screenshots, so we can take advantage of the PNG-8 format with\nits more compact 256 color palette.\n\n\n## Lossy Compression: Color Quantization\n\n\nThe first step is to reduce the color palette of the screenshot. This is a\ntype\n\nof lossy compression called [color\n\nquantization](https://en.wikipedia.org/wiki/Color_quantization), which will\n\nreduce the number of distinct colors in the image. The\n\n[pngquant](https://pngquant.org/) command line utility is the perfect tool\nfor\n\nthis job, and if you've used the popular [ImageAlpha](https://pngmini.com/)\n\ntool, you've already used the pngquant library.\n\n\n```bash\n\n# Install pngquant using Homebrew\n\nbrew install pngquant\n\n\n# Quantize 32-bit RGBA PNG to 8-bit (or smaller) RGBA-palette\n\n# pngquant [number of colors] [options] input.png\n\n#   --skip-if-larger  only save converted file if they're smaller than\noriginal\n\n#   --strip           remove optional metadata\n\n#   --ext=.png        set output filename to be same as input filename\n\n#   --force           overwrite existing output files\n\npngquant 256 --skip-if-larger --strip --ext=.png --force example.png\n\n```\n\n\nThe screenshots below illustrate different levels of palette size reduction.\n\n\n|PNG-32 (134KB)|256 colors (42KB)|128 colors (39KB)|64 colors (38KB)|\n\n|---|---|---|---|\n\n|[![Source\nPNG-32](https://about.gitlab.com/images/blogimages/png-compression/example.raw.png)](/images/blogimages/png-compression/example.raw.png)|[![256\ncolors](https://about.gitlab.com/images/blogimages/png-compression/example.256.png)](/images/blogimages/png-compression/example.256.png)|[![128\ncolors](https://about.gitlab.com/images/blogimages/png-compression/example.128.png)](/images/blogimages/png-compression/example.128.png)|[![64\ncolors](https://about.gitlab.com/images/blogimages/png-compression/example.64.png)](/images/blogimages/png-compression/example.64.png)|\n\n\n|32 colors (37KB)|16 colors (29KB)|8 colors (22KB)|4 colors (16KB)|\n\n|---|---|---|---|\n\n|[![32\ncolors](https://about.gitlab.com/images/blogimages/png-compression/example.32.png)](/images/blogimages/png-compression/example.32.png)|[![16\ncolors](https://about.gitlab.com/images/blogimages/png-compression/example.16.png)](/images/blogimages/png-compression/example.16.png)|[![8\ncolors](https://about.gitlab.com/images/blogimages/png-compression/example.8.png)](/images/blogimages/png-compression/example.8.png)|[![4\ncolors](https://about.gitlab.com/images/blogimages/png-compression/example.4.png)](/images/blogimages/png-compression/example.4.png)|\n\n\nI've observed for most screenshots you can comfortably reduce the color\npalette\n\nto as few as 64 colors before the difference in image quality becomes\nnoticeable. If you frequently take\n\nscreenshots of gradients or more complex images, you may want to stick with\n256\n\ncolors to avoid noticeable artifacts.\n\n\n## Lossless Compression: DEFLATE\n\n\nThe PNG image file format uses\n[DEFLATE](https://en.wikipedia.org/wiki/DEFLATE)\n\ncompression internally for an added layer of lossless compression, but most\nPNG\n\nlibraries do not implement aggressive lossless compression. This provides\n\nanother opportunity to reduce the file size further.\n\n\nIn 2013, Google released [zopfli](https://github.com/google/zopfli), which\n\nclaimed to **improve compression by 3-8% compared to `zlib`**. The trade off\nfor\n\nthis improvement: waiting an extra 1-2 seconds. (There is no decompression\n\npenalty when viewing the compressed image).\n\n\n```bash\n\n# Install zopfli using Homebrew, which includes zopflipng\n\nbrew install zopfli\n\n\n# Optimize PNG compression\n\n# zopflipng [options] input.png output.png\n\n#   -y  do not ask about overwriting files\n\nzopflipng -y example.png example.png\n\n```\n\n\nRelative to the massive savings from color quantization, improving lossless\n\ncompression provides a much smaller reduction, but in the context of pages\nwith\n\nmany images these marginal gains do add up to worthwhile savings.\n\n\n![Chart: file size\nsavings](https://about.gitlab.com/images/blogimages/png-compression/chart.png)\n\n\n## Automation\n\n\nThe trick is to make this happen automatically every time I capture a\nscreenshot\n\nusing [Hazel](https://www.noodlesoft.com/) or\n\n[Automator](https://support.apple.com/en-au/guide/automator/welcome/mac).\nThis\n\nallows you to run commands based on file events, like every time a new\n\nscreenshot is added to a directory.\n\n\nA bonus trick, is to create a dedicated Screenshots directory, so that they\n\ndon't clutter your desktop. This is also easy:\n\n\n```bash\n\n# Create a Screenshots directory in the current users Home directory\n\nmkdir -p \"$HOME/Screenshots\"\n\n\n# Configure macOS to capture screenshots to this location\n\n# If you want to revert this change, and save screenshots to your desktop,\n\n# instead use \"$HOME/Desktop\"\n\ndefaults write com.apple.screencapture location \"$HOME/Screenshots\"\n\n```\n\n\nUsing Hazel, add the Screenshots folder where newly captured screenshots\nwill be\n\ncreated, and create a new rule to compress files when they are added.\nCombining\n\nthe commands above, and using `$1` syntax to access the filename argument\npassed\n\nby Hazel, the final script is:\n\n\n```bash\n\npngquant 64 --skip-if-larger --strip --ext=.png --force \"$1\"\n\nzopflipng -y \"$1\" \"$1\"\n\n```\n\n\nAlternatively, using Automator, create a new **Folder Action** that receives\n\nnotifications from the Screenshots folder. Add a **Run Shell Script** block,\nand\n\nmake sure to **Pass input as arguments**. Combining the commands above, and\nthis\n\ntime using `$@` syntax to handle multiple arguments, and absolute paths for\n\npngquant and zopflipng, the final script is:\n\n\n```bash\n\nfor f in \"$@\"\n\ndo\n  /usr/local/bin/pngquant 64 --skip-if-larger --strip --ext=.png --force \"$f\"\n  /usr/local/bin/zopflipng -y \"$f\" \"$f\"\ndone\n\n```\n\n\nHere's a screenshot of my configuration.\n\n\n| Hazel | Automator |\n\n|---|---|\n\n| [![Hazel Screenshot Compression\nRule](https://about.gitlab.com/images/blogimages/png-compression/hazel.png)](/images/blogimages/png-compression/hazel.png)\n| [![Automator Screenshot Compression\nAction](https://about.gitlab.com/images/blogimages/png-compression/automator.png)](/images/blogimages/png-compression/automator.png)\n|\n\n\nMy final trick, is to add the Screenshots folder to my Dock for easy access.\n\nThis is achieved by dragging the Screenshots folder from Finder to your\nDock.\n\n\n## Summary\n\n\nThe PNG file format is great for screenshots, but the defaults output is too\n\nlarge for sharing on the internet. Instead of using ImageAlpha and\nImageOptim to\n\ncompress your screenshots by hand, you can use Hazel to automate this to\n\nregularly yield file size reductions of 80%.\n\n\nIf you know of compression tricks, or alternatives that work on Windows or\n\nLinux, let me know below in the comments!\n\n\nCover image by [Emmy Smith](https://unsplash.com/@emsmith) on\n[Unsplash](https://unsplash.com/photos/LEjEst7lLfU)\n",[9],{"slug":3480,"featured":6,"template":699},"simple-trick-for-smaller-screenshots","content:en-us:blog:simple-trick-for-smaller-screenshots.yml","Simple Trick For Smaller Screenshots","en-us/blog/simple-trick-for-smaller-screenshots.yml","en-us/blog/simple-trick-for-smaller-screenshots",{"_path":3486,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3487,"content":3493,"config":3498,"_id":3500,"_type":13,"title":3488,"_source":15,"_file":3501,"_stem":3502,"_extension":18},"/en-us/blog/situational-leadership-strategy",{"title":3488,"description":3489,"ogTitle":3488,"ogDescription":3489,"noIndex":6,"ogImage":3490,"ogUrl":3491,"ogSiteName":685,"ogType":686,"canonicalUrls":3491,"schema":3492},"Situational Leadership Strategy","GitLab CEO Sid Sijbrandij shares how he incorporates situational leadership in his management style.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679453/Blog/Hero%20Images/remote-work.png","https://about.gitlab.com/blog/situational-leadership-strategy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Situational Leadership Strategy\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2021-11-19\",\n      }",{"title":3488,"description":3489,"authors":3494,"heroImage":3490,"date":3495,"body":3496,"category":718,"tags":3497},[1463],"2021-11-19","\n \n[Situational Leadership Theory](https://situational.com/blog/the-four-leadership-styles-of-situational-leadership/) is a model created by Paul Hersey and Ken Blanchard in 1969. It describes  a leadership style that is adapted to a direct report depending on the unique individual or situation, with no\none style being better than another.\n \nHersey and Blanchard grouped leadership styles into four behaviors:\n \n* **Telling:** The report lacks the skills required to do the job, but is willing to work at it.\n* **Selling:** The report is capable of performing, but is unwilling to do the task.\n* **Participating:** The report is experienced in performing the task, but not confident.\n* **Delegating:** The report is experienced, confident, and takes ownership of the task.\n \nDepending on the individual and the task at hand, it’s necessary to adapt your leadership approach in order to be\nthe most effective leader possible.\n \nI have built on top of this model as I adapt my leadership style based on specific circumstances.\n \nThe following factors inform my approach to managing an individual in a specific situation:\n \n1. **Experience level**: What is the experience level of the report?\n1. **Skills required**: What skills are required to perform the task?\n1. **My own skill**: What skills do I have to perform the task? Should I delegate my weaknesses or strengths?\n1. **Task importance**: What is the importance and priority of the task?\n1. **Task urgency**: How quickly do we need to complete the task?\n1. **Opportunities to provide feedback**: What opportunities are there to provide feedback? Should the feedback be in a group setting or in a 1-1?\n1. **Learning opportunities**: Are others able to learn from doing the task? Does a group setting or live stream help others learn?\n1. **Reporting relationship**: Are they a direct or indirect report? Are they external to the company?\n1. **Time available**: How much time does the report have to perform the task? What is their capacity?\n1. **Time needed**: How much time would it take me to perform the task?\n1. **Current solution**: What is the shortfall of the current solution?\n1. **My emotion**: How much does the shortfall bother me?\n1. **Feedback effort**: How much effort do I need to invest in order to give the feedback?\n1. **Feedback allocation**: How much time is available to provide feedback?\n1. **Previous feedback**: What feedback have they already received regarding the task? Have I already given feedback?\n1. **Team member’s state of mind**: How is the report feeling?\n1. **Metrics**: What data is available to the report as a means of automatic feedback?\n1. **Relationship duration**: How long do I expect to work with this person?\n1. **Resourcing needed**: What resources does the person need to complete the task? Do they have these resources available?\n \nThese are also not complete tradeoffs. A combination of any number of these factors help determine my approach. For example, I may choose to more heavily weight a team member’s state of mind if I know that they recently experienced a personal hardship and the task does not have great urgency--even if I have a high level of emotional engagement.\n \nIt’s important to note that while this list outlines key considerations that inform my management style, it doesn’t mean that I choose the most effective approach in a particular instance. \n \nFor more information on Situational Leadership and you can adapt your own leadership style, check out the book\n[_Management of Organizational Behavior_](https://www.amazon.com/Management-Organizational-Behavior-10th-Hersey/dp/0132556405) by Paul Hersey, Ken Blanchard, and Dewey Johnson.\n",[2744,696,9],{"slug":3499,"featured":6,"template":699},"situational-leadership-strategy","content:en-us:blog:situational-leadership-strategy.yml","en-us/blog/situational-leadership-strategy.yml","en-us/blog/situational-leadership-strategy",{"_path":3504,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3505,"content":3511,"config":3516,"_id":3518,"_type":13,"title":3519,"_source":15,"_file":3520,"_stem":3521,"_extension":18},"/en-us/blog/software-test-at-gitlab",{"title":3506,"description":3507,"ogTitle":3506,"ogDescription":3507,"noIndex":6,"ogImage":3508,"ogUrl":3509,"ogSiteName":685,"ogType":686,"canonicalUrls":3509,"schema":3510},"An inside look at software testing at GitLab","Director of quality engineering Mek Stittri talks test technology and the future of automation at GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680800/Blog/Hero%20Images/softwaretestlaunch.jpg","https://about.gitlab.com/blog/software-test-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"An inside look at software testing at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-08-30\",\n      }",{"title":3506,"description":3507,"authors":3512,"heroImage":3508,"date":3513,"body":3514,"category":718,"tags":3515},[2836],"2019-08-30","\n\n_In our [just-released survey of over 4,000 developers, security\nprofessionals, and operations team members](/developer-survey/), there was one thing everyone agreed on: 50% of each group\nsaid software testing is the biggest reason why development is delayed. Testers have long\nbeen the underdogs in the SDLC and that viewpoint is apparently very slow to change.\nTo understand what’s really going on, and how things work at GitLab, we\nasked [Mek Stittri](/company/team/#mekdev), director of quality engineering, to share his\nperspective on what’s working with test today and what’s in need of improvement._\n\n## Why is test a continued DevOps problem?\n\nIt’s a two-part answer, Mek says. First, there are simply not enough tests run and second, the tests that are used are often flaky (meaning their results aren’t necessarily trustworthy).\n\nTackling the issue of not running enough tests, Mek says it’s an area GitLab is addressing. “At GitLab, I think we are better than other companies where developers write unit tests and integration tests every time a change goes in,” he says. “That is great, but that testing is at a lower level, and it doesn't really map to a business use case.” To write better tests a team needs test requirements, but there can be so many different sets of stakeholders that it can be tough to get their input about *test* requirements and not just feature requirements. “We are improving it here at GitLab where our VP of Product [Scott Williamson](https://gitlab.com/sfwgitlab) is doing a great job. We have a section for test requirements right now (in the issue and merge request templates). It's now a blank and free form for people to fill in, but it should be highlighted going forward as a required section taking input from product discovery and validation as a deliverable.”\n\nThe bottom line: the stakeholders who are delivering the code need to understand the end goal better. “Unit tests test code at a smaller scale, and that’s great, but it doesn’t really verify the functionality works end to end as a whole. We need more coverage and more understanding of what needs to be tested.”\n\n![The Apollo 11 launch framework](https://about.gitlab.com/images/blogimages/apollo11framework.png){: .shadow.small.center}\nApollo 11 is held up by a framework and software is no different.\n{: .note.text-center}\n\nMek likens this process to Apollo 11. Everyone is excited about the rocket (the software features, in other words) but no one pays attention to the red scaffolding on the right that’s actually holding the rocket up. “That’s the side that nobody looks at but it’s a lot of work,” he says. “It’s taller than the rocket. We need to build that platform to have adequate testing (functional, performance, etc).” The ideal situation to get a company there? Start building the test framework and add test coverage at the exact same time the product is being built. “You assemble it together, run it, it’s passing and we go for launch and it’s shipped. We’re not there yet. And I can assure you a lot of companies out there aren’t there yet either.”\n\n## About those flaky tests…\n\n“There are a lot of test automation engineers and test developers out there, but not all of them know how to write and design a good test,” Mek explains. Automated tests needs to function like a flow of self-retrying dominoes where if one step is not completed it needs to keep retrying to reach the next step. Tests need to mimic what a manual tester would do, he says. No manual tester is going to click on a button and then wait 10 minutes. The tester will click again, or try other strategies. “At GitLab [we put emphasis on test framework reliability](/handbook/engineering/quality/#test-framework-reliability-and-efficiency) and we treat each user workflow step like a piece of retrying dominoes. We need to make sure all the dominoes fall over so the workflow is completed,” Mek says.\n\n>We need more coverage and more understanding of what needs to be tested.\n\nSo companies need to think through how the tests work, but also test the right things. If that happens, quality can be everyone’s responsibility in the end, Mek says. “We want developers to contribute to the end-to-end test so you want to make a test framework that is easy to use and easy to read. I think this all factors in.” And Mek points out it really is in everyone’s best interests to think about quality first. “Let's make the process better so we work smarter, right? We achieve more without having to work weekends or get pinged during your family dinner. Nobody wants that.”\n\n## Test automation and machine learning\n\nTest automation is a cornerstone of successful [DevOps](/topics/devops/) but it remains difficult for many companies to achieve. Mek’s take: “We need to design the product such that the test automation framework can integrate into it well,” he says flatly. That requires good collaboration with development teams due to frontend UI locators and backend APIs that are the interfaces to enable better and stable test automation. “Go back to Apollo 11,” Mek says. “It's like the connections along the rocket's fuselage. I need to integrate with this to make sure things are working fine. The probes and sensors need to be there. So if those aren't there, then your test automation engineers need to code around these obstacles. It's not working smart.” In other words, the test automation framework should not take the longer route when executing user interactions to the application because this can be the source of unstable and in-efficient tests.\n\nOne step that can help companies – including GitLab – get there is [machine learning](https://medium.com/machine-learning-for-humans/why-machine-learning-matters-6164faf1df12). “We are having discussions here at GitLab about where we want a bot,” Mek says. “I think machine learning will come and help, but the input and output needs to be clearly defined so you have a clear implementation direction, TensorFlow, Linear Regression, or whatever techniques. You can write a bot that just lives in the product, meaning it looks at all the UI locators (dedicated to test automation) on a page and randomly clicks one of those links.” This GitLab bot of the future will work 24/7, clicking, clicking, clicking on the page until it errors out or runs into a 404, Mek says. The goal is to create a bot that is like a “menacing QA engineer” that can be programmed to keep banging on the problematic areas until everything is solved. To get there will require lots of data – machine learning literally needs to learn from data and experience – and although there are a handful of companies experimenting with this now, this is all still very early stage.\n\n## Where we’re headed with testing\n\nMek and his team hope to increase both quality and productivity this year which may be a bit of a balancing act, since more “quality” equals more testing which can result in a longer development cycle and perhaps reduced productivity (this is why we say test automation engineers are often unappreciated!). “My department is working this quarter to have a full suite of automated tests for our enterprise features. We want to have a big checkbox for the enterprise features every time we deploy. We need this because it is mapping to the business use case.” But Mek and team need to do all of that while shortening the test runtime for developers. “You want more test coverage but we need to keep the runtime low because we can’t have developers and release managers wait two hours.”\n\nThe plan is to add more runners, optimize them, de-duplicate some tests and make sure the process is as streamlined as it can be. “Right now it takes about an hour or so, but I would love to have it down to 30 minutes where we certify that this merge request going in checks all the boxes and all the enterprise features are not broken. We need to set ourselves an aggressive goal and I would say 30 minutes is a good first step.”\n\nCover image by [Kurt Cotoaga](https://unsplash.com/@kydroon) on [Unsplash](https://unsplash.com)\n{: .note}\n",[790,789,722,9],{"slug":3517,"featured":6,"template":699},"software-test-at-gitlab","content:en-us:blog:software-test-at-gitlab.yml","Software Test At Gitlab","en-us/blog/software-test-at-gitlab.yml","en-us/blog/software-test-at-gitlab",{"_path":3523,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3524,"content":3530,"config":3535,"_id":3537,"_type":13,"title":3538,"_source":15,"_file":3539,"_stem":3540,"_extension":18},"/en-us/blog/solve-devsecops-challenges-with-gitlab-ci-cd",{"title":3525,"description":3526,"ogTitle":3525,"ogDescription":3526,"noIndex":6,"ogImage":3527,"ogUrl":3528,"ogSiteName":685,"ogType":686,"canonicalUrls":3528,"schema":3529},"How GitLab CI helps solve common DevSecOps challenges","How single application continuous integration helps team automate and collaborate.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681305/Blog/Hero%20Images/ci-use-case-web-header.png","https://about.gitlab.com/blog/solve-devsecops-challenges-with-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab CI helps solve common DevSecOps challenges\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-05-12\",\n      }",{"title":3525,"description":3526,"authors":3531,"heroImage":3527,"date":3532,"body":3533,"category":718,"tags":3534},[1113],"2020-05-12","\n\nCollaboration is an important part of [DevSecOps](/solutions/security-compliance/). Effective collaboration requires visibility, not only into the work being done by other members of the team, but also into the processes that help the team produce that work in the first place. It can be hard to gauge bottlenecks, solve problems, fix bugs, or work agilely if everyone is juggling their own set of tools or siloed within their own environments.\n\n\n## DevSecOps challenges\n\nOne of the reasons that we frequently discuss toolchain complexity is that it can hinder development speed in significant ways. [In a survey conducted by Forrester](/resources/whitepaper-forrester-manage-your-toolchain/) of over 250 IT professionals, 45% said they were using three or more tools for software delivery. Of those using three tools or more, **two-thirds were using eleven or more tools per toolchain**. While using multiple tools isn’t a bad thing in itself, it adds layers of complexity to processes that are already pretty complicated.\n\nIntegrated toolchains require regular maintenance. If teams rely on a [plugin environment](/blog/plugin-instability/), there are also dependencies that need to be monitored and updated. For teams using microservices, they may also have to contend with 20 different pipelines, each with hundreds of shell script outputs. Dealing with [brittle pipelines](https://harness.io/2018/09/4-reasons-your-jenkins-pipelines-are-brittle/) is a common challenge, and for those using plugins it can be difficult to assess whether the pipeline itself is broken vs. the actual software artifact or build that’s being tested.\n\nFrom an operations perspective, managing multiple toolchains is time-consuming. When problems or errors arise and need to be sent back to the developer, it becomes difficult to troubleshoot because the code isn’t fresh in their mind (also known as context switching). Instead of focusing on building applications, developers worry about environments. Instead of focusing on infrastructure optimization, operations teams have to put out fires.\n\nDevSecOps teams need to be able to collaborate, and visibility is a key component in helping teams work better together. By simplifying the toolchain, it reduces barriers to communication and gives [DevOps access](/topics/devops/) to the entire software development lifecycle (SDLC). When teams can build, test, and deploy with single sign-on simplicity, they can solve problems and share knowledge all in one place.\n\nGitLab’s [complete DevOps platform](/solutions/devops-platform/), delivered as a single application, offers built-in CI/CD so that teams can test and deploy all from one interface. Instead of logging into multiple tools, everyone has access to the same information.\n\n## Benefits of GitLab CI/CD\n\n1. **Eliminate siloes:** A complicated toolchain isolates teams and tools, creating bottlenecks in the development lifecycle. GitLab brings dev, sec, and ops together in one interface.\n2. **Greater visibility:** With full visibility across the entire SDLC, teams can solve problems faster with fewer roadblocks.\n3. **Increased efficiency:** Instead of managing a brittle plugin environment or maintaining multiple tools, teams can focus on more productive tasks.\n4. **Industry-leading CI/CD:** Teams don't have to sacrifice functionality for convenience. GitLab's CI/CD offers everything teams need for cloud native application development and was [voted a leader in CI by the Forrester Wave](/analysts/forrester-cloudci19/).\n\nTo learn more about single application CI/CD, download our eBook and see how we compare to other CI tools.\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nThe benefits of single application CI/CD eBook - [Read here](/why/use-continuous-integration-to-build-and-test-faster/)!\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n",[108,9],{"slug":3536,"featured":6,"template":699},"solve-devsecops-challenges-with-gitlab-ci-cd","content:en-us:blog:solve-devsecops-challenges-with-gitlab-ci-cd.yml","Solve Devsecops Challenges With Gitlab Ci Cd","en-us/blog/solve-devsecops-challenges-with-gitlab-ci-cd.yml","en-us/blog/solve-devsecops-challenges-with-gitlab-ci-cd",{"_path":3542,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3543,"content":3549,"config":3555,"_id":3557,"_type":13,"title":3558,"_source":15,"_file":3559,"_stem":3560,"_extension":18},"/en-us/blog/solving-gitlabs-changelog-conflict-crisis",{"title":3544,"description":3545,"ogTitle":3544,"ogDescription":3545,"noIndex":6,"ogImage":3546,"ogUrl":3547,"ogSiteName":685,"ogType":686,"canonicalUrls":3547,"schema":3548},"How we solved GitLab's CHANGELOG conflict crisis","How we eliminated changelog-related merge conflicts and automated a crucial part of our release process.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672139/Blog/Hero%20Images/solving-gitlab-changelog-crisis.jpg","https://about.gitlab.com/blog/solving-gitlabs-changelog-conflict-crisis","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we solved GitLab's CHANGELOG conflict crisis\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Robert Speicher\"}],\n        \"datePublished\": \"2018-07-03\",\n      }",{"title":3544,"description":3545,"authors":3550,"heroImage":3546,"date":3552,"body":3553,"category":832,"tags":3554},[3551],"Robert Speicher","2018-07-03","Since its [very first commit] more than six years ago, GitLab has had a\nchangelog\n\ndetailing the noteworthy changes in each release. Shortly after [Enterprise\n\nEdition (EE) was introduced], it [got a changelog of its own]. Whenever\nanyone\n\n– whether it was a community contributor or a GitLab employee – contributed\na\n\nnew feature or fix to the project, a changelog entry would be added to let\nusers\n\nknow what improved.\n\n\nAs GitLab gained in popularity and started receiving more contributions,\nwe'd\n\nconstantly see merge conflicts in the changelog when multiple merge requests\n\nattempted to add an entry to the list. This quickly became a major source of\n\ndelays in development, as contributors would have to rebase their branch in\norder\n\nto resolve the conflicts.\n\n\nThis post outlines how we completely eliminated changelog-related merge\n\nconflicts, removed bottlenecks for contributions, and automated a crucial\npart\n\nof our release process.\n\n\nAt the beginning, GitLab's `CHANGELOG` file would look something like this:\n\n\n```text\n\nv 8.0.0 (unreleased)\n  - Prevent anchors from being hidden by header (Stan Hu)\n  - Remove satellites\n  - Better performance for web editor (switched from satellites to rugged)\n  - Faster merge\n  - ...\n  - Ability to fetch merge requests from refs/merge-requests/:id\n\nv 7.14.1\n  - Improve abuse reports management from admin area\n  - Ability to enable SSL verification for Webhooks\n\nv 7.14.0\n  - Fix bug where non-project members of the target project could set labels on new merge requests.\n  - Upgrade gitlab_git to 7.2.14 to ignore CRLFs in .gitmodules (Stan Hu)\n  - ...\n  - Fix broken code import and display error messages if something went wrong with creating project (Stan Hu)\n```\n\n\nWhen a developer made a change in the upcoming release, `8.0.0` in this\nexample,\n\nthey would add a changelog entry at the bottom:\n\n\n```diff\n\ndiff --git a/CHANGELOG b/CHANGELOG\n\nindex de2066f..0fc2c18 100644\n\n--- a/CHANGELOG\n\n+++ b/CHANGELOG\n\n@@ -5,6 +5,7 @@ v 8.0.0 (unreleased)\n   - Faster merge\n   - ...\n   - Ability to fetch merge requests from refs/merge-requests/:id\n+  - Made literally everything better. Evvvvverything!\n\n v 7.14.1\n   - Improve abuse reports management from admin area\n```\n\n\nAt the same time, another developer might have made a similar change in\n_their_\n\nbranch:\n\n\n```diff\n\ndiff --git a/CHANGELOG b/CHANGELOG\n\nindex de2066f..5f81cfd 100644\n\n--- a/CHANGELOG\n\n+++ b/CHANGELOG\n\n@@ -5,6 +5,7 @@ v 8.0.0 (unreleased)\n   - Faster merge\n   - ...\n   - Ability to fetch merge requests from refs/merge-requests/:id\n+  - Made a few things worse. Woops!\n\n v 7.14.1\n   - Improve abuse reports management from admin area\n```\n\n\nNow when one branch was merged, it'd create a conflict in the other:\n\n\n```diff\n\ndiff --cc CHANGELOG\n\nindex 5f81cfd,0fc2c18..0000000\n\n--- a/CHANGELOG\n\n+++ b/CHANGELOG\n\n@@@ -5,7 -5,7 +5,11 @@@ v 8.0.0 (unreleased\n    - Faster merge\n    - ...\n    - Ability to fetch merge requests from refs/merge-requests/:id\n++\u003C\u003C\u003C\u003C\u003C\u003C\u003C HEAD\n +  - Made a few things worse. Woops!\n++=======\n\n+   - Made literally everything better. Evvvvverything!\n\n++>>>>>>> developer-1\n\n  v 7.14.1\n    - Improve abuse reports management from admin area\n```\n\n\nThis resulted in a ton of wasted time as something would get merged, and\nthen\n\nevery other open branch adding a changelog entry would need to be rebased.\nThe\n\nsituation only got worse as the number of contributors to GitLab grew over\ntime.\n\n\nOur initial, [boring solution] to the problem was to begin adding empty\n\nplaceholder entries at the beginning of each monthly release cycle. The\n\nchangelog for the upcoming unreleased version might look like this:\n\n\n```\n\nv8.1.0 (unreleased)\n  -\n  -\n  -\n  -\n  -\n  -\n  -\n  - (and so on)\n```\n\n\nA developer would make their change and then choose a random spot in the\nlist to\n\nadd a changelog entry. This worked for a while, until the placeholders began\nto\n\nbe filled out as we got closer to the release date. Eventually two (or more)\n\nmerge requests would attempt to add different entries at the same\nplaceholder,\n\nand one being merged created a conflict in the others.\n\n\nThe problem was lessened, but not solved.\n\n\nNot only was this a huge waste of time for developers, it created an\nadditional\n\nheadache for [release managers] when they cherry-picked a commit into a\nstable\n\nbranch for a patch release. If the commit included a changelog entry, which\nany\n\nchange intended for a patch release _should_ have, cherry-picking that\ncommit\n\nwould bring in the contents of the changelog at the point of that commit,\noften\n\nincluding dozens of unrelated changes. The release manager would have to\n\nmanually remove the unrelated entries, often doing this multiple times per\n\nrelease. This was compounded when we had to release multiple patch versions\nat\n\nonce due to a security issue.\n\n\n[very first commit]:\nhttps://gitlab.com/gitlab-org/gitlab-ce/commit/9ba1224867665844b117fa037e1465bb706b3685\n\n[Enterprise Edition (EE) was introduced]:\n/releases/2013/07/22/announcing-gitlab-enterprise-edition/\n\n[got a changelog of its own]:\nhttps://gitlab.com/gitlab-org/gitlab-ee/commit/e316324be5f71f02a01ae007ab1cf5cbe410c2e1\n\n[boring solution]: https://handbook.gitlab.com/handbook/values/#efficiency\n\n[release managers]:\nhttps://gitlab.com/gitlab-org/release/docs/blob/master/quickstart/release-manager.md\n\n\n## Brainstorming solutions\n\n\nFrustrations with the process finally reached a tipping point, and [an issue\nwas\n\ncreated] to discuss a solution. [Yorick] had the [original idea] that would\n\nultimately form the foundation of our solution. During a [trip around the\n\nworld], myself, [Douwe], and [Marin] were in Brooklyn, NY, and during a walk\n\naround the city one beautiful summer evening we ended up [with a proposal]\nto\n\nfinally solve the problem.\n\n\nEach changelog entry would be its own YAML file in a `CHANGELOG/unreleased`\n\nfolder. When a release manager went to cherry-pick a merge into a stable\nbranch\n\nin preparation for a release, they'd use a custom script that would perform\nthe\n\ncherry-pick and then move any changelog entry added by that action to a\n\nversion-specific subfolder, such as `CHANGELOG/8.9.4`. At the time of\nrelease,\n\nany entries in the version's subfolder would be compiled into a single\nMarkdown\n\nchangelog file, and then deleted.\n\n\nWith an idea of where we wanted to end up but no idea how to get there, I\n\nstarted with a [spike].\n\n\n[an issue was created]: https://gitlab.com/gitlab-org/gitlab-ce/issues/17826\n\n[original idea]:\nhttps://gitlab.com/gitlab-org/gitlab-ce/issues/17826#note_12623521\n\n[Yorick]: /company/team/#yorickpeterse\n\n[Douwe]: /company/team/#DouweM\n\n[Marin]: /company/team/#maxlazio\n\n[trip around the world]: /2016/08/24/gitlab-in-action/\n\n[spike]: https://gitlab.com/snippets/1713271\n\n\n## A turning point\n\n\nAfter a few days of working on the spike, I [had a realization] that we\ndidn't\n\nneed the cherry-picking concept at all:\n\n\n> Cherry picking a merge commit into a stable branch will add that merge's\n\n> `CHANGELOG/unreleased/whatever-its-called.yml` file to the stable branch.\nUpon\n\n> tagging a release with release-tools, we can consider _everything_ in that\n\n> stable branch's \"unreleased\" folder as part of the tagged release. We\ncollect\n\n> those files, compile them to Markdown, remove them from the stable branch\n\n> _and_ `master`, and that's our changelog for the release.\n\n\nThis was a major \"aha\" moment, as it greatly simplified the\n\nworkflow for release managers. They could continue their existing workflow,\nand\n\nthe release flow would transparently handle the rest. It also meant we could\n\nhandle everything in our [release-tools] project, which is responsible\n\nfor tagging a release and kicking off our packaging.\n\n\nEven though we ended up not using a lot of the work that went into it, my\n\noriginal spike was still valuable. It allowed us to see pain points early\non,\n\nrefine the process, and find a better solution. It also gave me additional\n\nexperience interacting with Git repositories programmatically via [Rugged],\nand\n\nthat would go on to be especially useful as we implemented the final\ntooling.\n\n\n[with a proposal]:\nhttps://gitlab.com/gitlab-org/gitlab-ce/issues/17826#note_12998363\n\n[had a realization]:\nhttps://gitlab.com/gitlab-org/gitlab-ce/issues/17826#note_13527876\n\n[release-tools]: https://gitlab.com/gitlab-org/release-tools/\n\n[Rugged]: https://github.com/libgit2/rugged\n\n\n## Building the building blocks\n\n\nWe knew there were several components that we'd need to build:\n\n\n1. Something to read and represent the individual YAML data files\n\n1. Something to compile individual entries into a Markdown list\n\n1. Something to insert the compiled Markdown into the _correct spot_ in an\n   existing list of releases\n1. Something to remove the files that had been compiled, and then commit the\n   updated `CHANGELOG.md` file to the repository\n\nAll of these components were created in a [single merge request] and refined\n\nthrough several code review cycles. The commits listed there are all fairly\n\natomic and may be interesting to read through on their own. The code review\nthat\n\nhappened in the merge request was incredibly valuable, and allowed us to\nreally\n\nsimplify some code that was hard to wrap one's head around, even for me as\nthe\n\noriginal author!\n\n\n## Automated testing\n\n\nOf course, we wouldn't consider this solution complete until we had\nautomated\n\ntests guaranteeing the behavior and consistency of the automated\ncompilation,\n\nincluding reading from and writing to multiple branches across multiple\n\nrepositories.\n\n\nI ended up using Rugged to create [fixture repositories] that would create a\n\nrepeatable testing environment, which we could then verify with [custom\nRSpec\n\nmatchers].\n\n\n[single merge request]:\nhttps://gitlab.com/gitlab-org/release-tools/merge_requests/29\n\n[fixture repositories]:\nhttps://gitlab.com/gitlab-org/release-tools/blob/6531d8d7b7acbdf6ab577db4381036bbc18e3bbc/spec/support/changelog_fixture.rb\n\n[custom RSpec matchers]:\nhttps://gitlab.com/gitlab-org/release-tools/blob/6531d8d7b7acbdf6ab577db4381036bbc18e3bbc/spec/support/matchers/rugged_matchers.rb\n\n\n## Hooking into the release process\n\n\nAt this point we were fairly confident the changelog compilation worked, so\nit\n\nwas time to [hook it into our existing release process].\n\n\nWhile testing this integration on a real release, we uncovered a pretty\n\nhilarious (but dangerous) oversight. I'll let the commit that fixed it speak\nfor\n\nitself:\n\n\n> [Protect against deleting everything when there are no changelog\nentries](https://gitlab.com/gitlab-org/release-tools/merge_requests/47/diffs?commit_id=5b3fe48a7697bda856b6bed1fedc4c210439849b)\n\n>\n\n> On a stable branch with no changelog entry files, the resulting empty\n\n> array was passed to `Rugged::Index#remove_all` which, when given an\n\n> empty array, removes **everything**. This was not ideal.\n\n\n[hook it into our existing release process]:\nhttps://gitlab.com/gitlab-org/release-tools/merge_requests/47\n\n\n## Developer tooling\n\n\nThe final pieces of the puzzle were creating a tool to help developers\ncreate\n\nvalid changelog entries easily, and adding documentation. Both were handled\nin\n\n[this merge\nrequest](https://gitlab.com/gitlab-org/gitlab-ce/merge_requests/7098).\n\n\nThis tool allows developers to run `bin/changelog`, passing it the title of\n\ntheir change, to generate a valid changelog entry file. Additional options\nare\n\n[in the\ndocumentation](https://docs.gitlab.com/ee/development/changelog.html).\n\n\n## Future plans\n\n\nThis changelog process has worked beautifully for us since it was\nintroduced,\n\nand we know it might be just as useful to other projects. We're\n[investigating a\n\nway to make it more generic] so that it can remove a tedious chore for more\n\ndevelopers.\n\n\nI worked on this project as part of our Edge team, now known as the [Quality\n\nteam]. If you're interested in this kind of internal tooling or other\n\nautomation, we're hiring! Check out our [open positions](/jobs/).\n\n\n[investigating a way to make it more generic]:\nhttps://gitlab.com/gitlab-org/release-tools/issues/209\n\n[Quality team]: https://about.gitlab.com/handbook/engineering/quality/\n\n\nPhoto by [Patrick\nTomasso](https://unsplash.com/photos/1S-PanVaJmU?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non\n[Unsplash](https://unsplash.com/search/photos/abstract?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n\n{: .note}\n",[790,9],{"slug":3556,"featured":6,"template":699},"solving-gitlabs-changelog-conflict-crisis","content:en-us:blog:solving-gitlabs-changelog-conflict-crisis.yml","Solving Gitlabs Changelog Conflict Crisis","en-us/blog/solving-gitlabs-changelog-conflict-crisis.yml","en-us/blog/solving-gitlabs-changelog-conflict-crisis",{"_path":3562,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3563,"content":3569,"config":3574,"_id":3576,"_type":13,"title":3577,"_source":15,"_file":3578,"_stem":3579,"_extension":18},"/en-us/blog/speed-security-quality-with-hackerone",{"title":3564,"description":3565,"ogTitle":3564,"ogDescription":3565,"noIndex":6,"ogImage":3566,"ogUrl":3567,"ogSiteName":685,"ogType":686,"canonicalUrls":3567,"schema":3568},"Workflow tips to ship faster without sacrificing security or quality","We partnered up with HackerOne to explain how to ship faster with a security-first development mindset. Watch the recording and check out the slides here.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671606/Blog/Hero%20Images/workflow-tips-security-quality-cover.jpg","https://about.gitlab.com/blog/speed-security-quality-with-hackerone","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Workflow tips to ship faster without sacrificing security or quality\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Erica Lindberg\"}],\n        \"datePublished\": \"2017-06-05\",\n      }",{"title":3564,"description":3565,"authors":3570,"heroImage":3566,"date":3571,"body":3572,"category":811,"tags":3573},[715],"2017-06-05","\n\n[Release early and often](/blog/release-early-release-often/),\nrespond quickly to customer feedback, iterate. Rinse, repeat.\nThe value of getting new features and products in front of customers faster has made\nits mark on the business world. As a result, development teams are under pressure\nto shorten release cycles and meet tighter deadlines all while maintaining high\nquality and security standards. How do experienced teams do it?\n\n\u003C!-- more -->\n\nAccelerating the development lifecycle without cutting corners is no easy feat but\nit can be done. While there's no \"silver bullet\" solution, adopting a security-first\nmindset and a few workflow best practices can help.\n\nWatch the our webcast with HackerOne below to get all the details on how you can build in quality and\nsecurity checks throughout your development lifecycle from GitLab's Product Manager, [Victor Wu](/company/team/#victorwu416),\nand GitLab Security Lead, [Brian Neel](/company/team/#b0bby_tables).\n\nYou can watch the recording, check out the slides, and read a few of the highlights\nbelow.\n\n## Security as a first-class citizen\n\nEnsuring every line of code is secure is a shared responsibility, meaning security\nshould be top of mind from the very beginning of the development process. Don't wait\nuntil the very end to start the conversation around security and check for vulnerabilities.\n\n> \"We want to take security and make it a first-class citizen. You want security controls\nbaked into each stage of your development process. When we develop software and we\ndevelop in small chunks, we always say we want cross-functional collaboration.\nWe want people at the table earlier on.\" - Victor Wu, Product Manager, GitLab\n\nWhether you have dedicated security experts, or perhaps a lead engineer who's wearing\nmultiple hats, talk about security from the get go so that security issues\ncan be identified earlier, and vulnerabilities can be avoided altogether.\n\n## Workflow best practices\n\nIn the webcast, Victor details how DevOps teams can bake quality and security controls\ninto their workflows so that these checks don't become cumbersome bottlenecks at the\nvery end of the process.\n\nHere are a couple of his highlights:\n\n### Make smaller changes and commit often.\n\nPerhaps the most critical adjustments to make to your workflow is how you actually write\nand collaborate on code. When we talk about development speed, a big part of this is transitioning\naway from developing huge portions of code over long periods of time to making smaller changes more often\nand making that work visible sooner.\n\n> \"We want to ship smaller pieces, often. Whether it's in an agile context, scrum,\nor moving away from the more traditional waterfall requirements, we want to ship\nin small pieces so we can react more quickly and minimize risk.\" - Victor\n\nBy adopting this practice, it's quicker to perform code reviews and\nsecurity checks because reviewers are only dealing with a couple of changes. Then,\nif there is an issue, it becomes much easier to identify the cause because there\nare fewer new variables to consider.\n\n### Involve experts and reviewers early in the development process.\n\nInvolving collaborators and reviewers earlier in the development process does two things.\nFirst, it can speed up the development process by giving stakeholders an opportunity\nto anticipate problems *before* developers begin to write code, and nip them in the bud.\nIt's common to involve your UX team, product managers, and software architects during the\nplanning phase and throughout the code review process, but often security is left out.\n\nGet your security experts involved in the earlier phases of your development process\nso it doesn't become a bottleneck right before you're trying to release.\n\n> \"Let's get our UX folks early on, let's get our business managers involved early on.\nLet's not wait until very late in the game before we bring our product managers,\nsenior engineers, our architect, and security experts.\" - Victor\n\nSecondly, by keeping all stakeholders involved in the conversation throughout the\ndevelopment process, you can ensure that by the time the code is ready to move\ninto production, most errors have been spotted and corrected.\n\n### Get code into staging or test environments earlier.\n\nThis goes back to the high-level concept that we want to work on small pieces of code and get\nthem integrated into the mainline branch right away to minimize the risk of something not working,\nor not accounting for certain things.\n\n\"The point of pushing code into production-like environments is to get your feature into a place that looks\nand functions more like the real world,\" says Victor. Getting your code into staging or test environments sooner\ncan also help to minimize security risks.\n\n> \"You might have certain tools to scan dynamically and inject attacks into\nyour systems, whether that might be directly into your data or your code base.\nIn the same way that you have human testers doing manual testing, in addition to the automated testing,\nyou might have human users doing the security testing as well.\" - Victor\n\nAgain, if you're developing in small chunks, involving stakeholders earlier on into those environments,\nthat they can jump into those environments and start testing the feature.\n\n### Leverage your community to spot and prioritize security issues and bugs faster.\n\nEven with all the right quality and security checks threaded throughout the development process,\nproblems can slip through. In the webcast, Security Lead, Brian Neel, details the\nevolution of the security development process (starts at 28:20) and why GitLab's security\nteam uses a bug bounty program to round out our security practices.\n\n> \"Right around the time you push a beta out to customers, you can open up a bug bounty program, and it provides\nsort of an endless coverage from prior to version 1 all the way through version 2 and into the future for any new\nvulnerabilities. You're constantly going to have professional hackers out there testing this code, testing it against new types of\nvulnerabilities.\"\n\n## Recording\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/9_yicOrtbqM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\n## Slides\n\n\u003Ciframe src=\"//www.slideshare.net/slideshow/embed_code/key/fWsLY4ft2VvAMA\" width=\"595\" height=\"485\" frameborder=\"0\" marginwidth=\"0\" marginheight=\"0\" scrolling=\"no\" style=\"border:1px solid #CCC; border-width:1px; margin-bottom:5px; max-width: 100%;\" allowfullscreen> \u003C/iframe>\n\n",[787,9],{"slug":3575,"featured":6,"template":699},"speed-security-quality-with-hackerone","content:en-us:blog:speed-security-quality-with-hackerone.yml","Speed Security Quality With Hackerone","en-us/blog/speed-security-quality-with-hackerone.yml","en-us/blog/speed-security-quality-with-hackerone",{"_path":3581,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3582,"content":3588,"config":3594,"_id":3596,"_type":13,"title":3597,"_source":15,"_file":3598,"_stem":3599,"_extension":18},"/en-us/blog/speed-up-your-monorepo-workflow-in-git",{"title":3583,"description":3584,"ogTitle":3583,"ogDescription":3584,"noIndex":6,"ogImage":3585,"ogUrl":3586,"ogSiteName":685,"ogType":686,"canonicalUrls":3586,"schema":3587},"Speed up your monorepo workflow in Git","Tap into the features that can reap huge savings in the long run for any developer team.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665560/Blog/Hero%20Images/speedmonorepo.jpg","https://about.gitlab.com/blog/speed-up-your-monorepo-workflow-in-git","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Speed up your monorepo workflow in Git\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Cai\"}],\n        \"datePublished\": \"2022-09-06\",\n      }",{"title":3583,"description":3584,"authors":3589,"heroImage":3585,"date":3591,"body":3592,"category":832,"tags":3593},[3590],"John Cai","2022-09-06","Monorepos have grown in popularity in recent years. For many of us, they are\na\n\npart of our daily Git workflows. The trouble is working with them can be\nslow. Speeding up\n\na developer's workflow can reap huge savings in the long run for any team.\n\n\nFirst, a word about monorepos. What does it mean for a repository to be a\n\nmonorepo anyway? Well, it depends who you ask and the definition has become\n\nmore flexible over time, but here are a few.\n\n\n## Characteristics of monorepos\n\n\nMonorepos have the following characteristics.\n\n\n### Multiple sub-projects\n\n\nThe typical definition of \"monorepo\" is a repository that contains multiple\nsub-projects. For instance, let's imagine a repository with a web-facing\nfront end,\n\na backend, an iOS app directory, and an android app directory:\n\n\n```\n\nawesome-app/\n\n|\n\n|--backend/\n\n|\n\n|--web-frontend/\n\n|\n\n|--app-ios/\n\n|\n\n|--app-android/\n\n\n```\n\n\n`awesome-app` is a single repository:\n\n\n```\n\ngit clone https://my-favorite-git-hosting-service.com/awesome-app.git\n\n```\n\n\nThe [Chromium](https://github.com/chromium/chromium) repository is a good\n\nexample of this.\n\n\n### Large files\n\n\nRepositories can also grow to be very large if large files are checked in.\nIn\n\nsome cases, binaries or other large assets such as images are checked into\nthe\n\nrepository to have their history tracked. Other times, large files are\ninadvertently \n\nintroduced into the repository. The way Git history works, even if these\nfiles are\n\nimmediately removed, the single version that was checked in remains.\n\n\n### Old projects with deep histories\n\n\nWhile Git is very good at compressing text files, when a Git repository has\na deep history,\n\nthe need to keep all versions of a file around can cause the size of the\nrepository to be huge.\n\n\nThe [Linux](https://github.com/torvalds/linux) repository is a good example\nof this.\n\n\nFor instance, the Linux project's first Git commit is from [April\n2005](https://github.com/torvalds/linux/commit/1da177e4c3f41524e886b7f1b8a0c1fc7321cac2).\n\n\nAnd a `git rev-list --all --count` gives us 1,120,826 commits! That's a lot\nof\n\nhistory! Getting into Git internals a little bit, Git keeps a commit object,\nand a\n\ntree object for each commit, as well as a copy of the files at that snapshot\n\nin history. This means a deep Git history means a lot of Git data.\n\n\n## Speeding up your Git workflow\n\n\nHere are some features to help speed up your Git workflow.\n\n\n### Sparse checkout\n\n\n[git sparse checkout](https://git-scm.com/docs/git-sparse-checkout) reduces\nthe\n\nnumber of files you check out to a subset of the repository. (NOTE: This\nfeature\n\nin Git is still marked experimental.) This is especially useful in the case\nof\n\n[many sub-projects in a repository](#multiple-sub-projects).\n\n\nTaking our [example](#multiple-sub-projects) of a monorepo with multiple\n\nsub-projects, let's say that as a front-end web developer I only need to\nmake\n\nchanges to `web-frontend/`.\n\n\n```sh\n\n> git clone --no-checkout\nhttps://my-favorite-git-hosting-service.com/awesome-app.git\n\n> cd awesome-app\n\n> git sparse-checkout set web-frontend\n\n> git checkout\n\nYour branch is up to date with 'origin/master'.\n\n> ls\n\n> web-frontend README.md\n\n```\n\n\nOr, if you've already checked out a worktree, sparse checkout can be used to\nremove\n\nfiles from the worktree.\n\n\n\n```sh\n\n> git clone https://my-favorite-git-hosting-service.com/awesome-app.git\n\n> cd awesome-app\n\n> ls\n\n> backend web-frontend app-ios app-android README.md\n\n> git sparse-checkout set web-frontend\n\nUpdating files: 100% (103452/103452), done.\n\n> ls\n\n> web-frontend README.md\n\n```\n\n\nSparse checkout will only include the directories indicated, plus all files\n\ndirectly under the root repository directory.\n\n\nThis way, we only checkout the directories that we need, saving both space\nlocally\n\nand time since each time `git pull` is done, only files that are checked out\nwill\n\nneed to be updated.\n\n\nMore information can be found in the\n[docs](https://git-scm.com/docs/git-sparse-checkout)\n\nfor sparse checkout.\n\n\n### Partial clone\n\n\n[git partial\nclone](https://docs.gitlab.com/ee/topics/git/partial_clone.html#:~:text=Partial%20clone%20is%20a%20performance,0%20or%20later%20is%20required)\nhas a similar goal to sparse checkout in reducing the number\n\nof files in your local Git repository. It provides the option to filter out\n\ncertain types of files when cloning.\n\n\nPartial clone is used by passing the `--filter` option to `git-clone`.\n\n\n```sh\n\ngit clone --filter=blob:limit=10m\n\n```\n\n\nThis will exclude any files over 10 megabytes from being copied to the local\n\nrepository. A full list of supported filters are included in the\n\n[docs for\ngit-rev-list](https://git-scm.com/docs/git-rev-list#Documentation/git-rev-list.txt\n",[1684,9,1035],{"slug":3595,"featured":6,"template":699},"speed-up-your-monorepo-workflow-in-git","content:en-us:blog:speed-up-your-monorepo-workflow-in-git.yml","Speed Up Your Monorepo Workflow In Git","en-us/blog/speed-up-your-monorepo-workflow-in-git.yml","en-us/blog/speed-up-your-monorepo-workflow-in-git",{"_path":3601,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3602,"content":3608,"config":3614,"_id":3616,"_type":13,"title":3617,"_source":15,"_file":3618,"_stem":3619,"_extension":18},"/en-us/blog/start-using-git",{"title":3603,"description":3604,"ogTitle":3603,"ogDescription":3604,"noIndex":6,"ogImage":3605,"ogUrl":3606,"ogSiteName":685,"ogType":686,"canonicalUrls":3606,"schema":3607},"How to tidy up your merge requests with Git","Here's how to use a Git feature that saves a lot of time and cleans up your MRs.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672243/Blog/Hero%20Images/git-tricks-cover-image.png","https://about.gitlab.com/blog/start-using-git","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to tidy up your merge requests with Git\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ronald van Zon\"}],\n        \"datePublished\": \"2019-02-07\",\n      }",{"title":3603,"description":3604,"authors":3609,"heroImage":3605,"date":3611,"body":3612,"category":832,"tags":3613},[3610],"Ronald van Zon","2019-02-07","\n\nI've worked on a lot of open source projects and one thing they all have in common is\nwhen you create a merge request (or pull request) they will often ask, \"Can you clean up your request?\"\nbecause commits like *fix typo* should not be included in a Git history.\n\nNow there are a few ways of cleaning up commits and I'll show you what I have found to be the easiest way.\n\nBelow is an example scenario where I use a feature of Git that has saved me a lot of time.\nI have a tiny project seen in the image below.\n\n![Git Project](https://about.gitlab.com/images/blogimages/start-using-git/git_demo_project.png){: .shadow.medium.center}\n\nNow I like to run my `main.py` in a test environment to see if it works as expected.\nI like to do that by configuring a `.gitlab-ci.yml` to run `main.py`.\nAlthough this is extremely easy, for this example I made sure I increased the number of commits\nto illustrate my example a bit more clearly. So after some time my commit history looks like this:\n\n![Git Project](https://about.gitlab.com/images/blogimages/start-using-git/git_commits_bad.png){: .shadow.medium.center}\n\nHere you can see my first three commits add `README.md`, `main.py` and `.gitlab-ci.yml`.\nA few commits update my `gitlab-ci` file, trying some stuff out, and fixing typos.\nThere's also a commit that cleans up my `gitlab-ci` and two more to fix and clean up `main.py`.\n\nNow some of you might see this and think, \"Looks good,\" while others might want to scream at me\nfor making a mess out of my commits.\n\nHow do we fix it?\n\n## How to consolidate your commits\n\nFirst, let's revert the last two commits using [reset](https://docs.gitlab.com/ee/gitlab-basics/start-using-git.html#unstage-all-changes-that-have-been-added-to-the-staging-area).\nWe don't want to lose our changes so we will use `git reset --soft HEAD~2`.\n`--soft` will keep our changes of the files and `HEAD~2` tells Git the two commits from `HEAD` position should be reverted.\n\nWe create a new commit, `git commit --fixup 6c29979`. This will create a commit called `fixup! Add main Python file`.\nWhen we run `git rebase -i --autosquash 24d214a` we can see below that our `fixup` commit has been moved below\nthe commit we referenced with the tag *6c29979*. I could save this and the fixup will be merged into the commit above.\n\n![Git Project](https://about.gitlab.com/images/blogimages/start-using-git/git_rebase_1.png){: .shadow.medium.center}\n\nBut if we look at the commits below the *fixup*, we see that all the commits are related to the *.gitlab-ci.yml*\nand by making a small change here, we can clean up my commits in a single go. We will change the *pick* to *fixup* for all\ncommits but `Add default gitlab-ci` (shown in the image below) and we will save this.\n\n![Git Project](https://about.gitlab.com/images/blogimages/start-using-git/git_rebase_2.png){: .shadow.medium.center}\n\nChecking our Git log, we see that our long list of commits has been reduced to just three. There is a big change that\nyou should be aware of: because I have just rewritten my Git history I will have to use `git push --force` to update\nany *remote repository*.\n\n![Git Project](https://about.gitlab.com/images/blogimages/start-using-git/git_commits_good.png){: .shadow.medium.center}\n\nThis looks a lot better now; only the relevant commits are left. But could we have prevented this while working on this\nfeature? The answer is yes.\n\nWe could have used `git commit --amend` to add almost every commit behind *19d8353 Add default gitlab-ci*.\nThis wouldn't require any new commit for any changes that we were making to our `.gitlab-ci.yml` file. We would have ended\nup with the following and we already know how to handle the *fixup*.\n\n![Git Project](https://about.gitlab.com/images/blogimages/start-using-git/git_commits_alternative.png){: .shadow.medium.center}\n\nSomething to keep in mind when using features that rewrite the history of your Git repository: If you already\npushed your previous commits to a *remote repository* you will have to use `git push --force` to overwrite the\nhistory of the *remote repository*. Bad use of this could cause serious problems, so be careful!\nIf you run into trouble, a useful guide that could help you recover from this is [git push --force and how to deal with it](https://evilmartians.com/chronicles/git-push",[1364,9,1684],{"slug":3615,"featured":6,"template":699},"start-using-git","content:en-us:blog:start-using-git.yml","Start Using Git","en-us/blog/start-using-git.yml","en-us/blog/start-using-git",{"_path":3621,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3622,"content":3627,"config":3631,"_id":3633,"_type":13,"title":3634,"_source":15,"_file":3635,"_stem":3636,"_extension":18},"/en-us/blog/strategies-to-reduce-cycle-times",{"title":3623,"description":3624,"ogTitle":3623,"ogDescription":3624,"noIndex":6,"ogImage":2565,"ogUrl":3625,"ogSiteName":685,"ogType":686,"canonicalUrls":3625,"schema":3626},"10 strategies for cycle time reduction","Engineering leads share strategies on how to speed up cycle times.","https://about.gitlab.com/blog/strategies-to-reduce-cycle-times","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"10 strategies for cycle time reduction\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2018-10-12\",\n      }",{"title":3623,"description":3624,"authors":3628,"heroImage":2565,"date":3128,"body":3629,"category":832,"tags":3630},[852],"\n\nEvery product manager appreciates shorter cycle times. One way to reduce cycle\ntimes is to learn from others, so five of our engineering leads share the greatest\nchallenges their teams have experienced and offer the strategies they developed\nto speed up iteration.\n\n>“The impact of shorter cycle times is that users can see the result of their\ninput quickly. Instead of contributing to the planning process and then waiting\nfor weeks to see the feature start to take shape, they can regularly see changes,\nmaking them happy and keeping them engaged with a team. This also helps reduce\nthe scope creep that happens when a project has been in progress for a while.” – Rachel Nienaber\n\n## What's the average cycle time for development teams?\n\nAccording to the [Accelerate State of DevOps Report](https://www.hatica.io/blog/cycle-time/#:~:text=The%20Accelerate%20State%20of%20DevOps,cycle%20time%20of%206.2%20days), the average cycle time for top-performing teams is about 2 days, with the median for most teams being about 3.5 days. However, some development teams [report their average cycle times](https://linearb.io/blog/how-to-reduce-cycle-time-in-software-delivery/) as being as much as 7 days. Teams can calculate this by evaluating how long several types of fixes take from start to finish.\n\n## What are some cycle time challenges?\n\nEvery team has processes and steps that increase cycle delivery time. A shorter and faster time to market empowers teams to fulfill customer demands and exceed their expectations. Here are a few of\nthe ones we’ve dealt with in recent past.\n\n### Getting it right the first time\n\nWhen developing new features, we want to ensure that things don’t break when it\ngets to a user. Because of our monthly release cycle, users could be stuck with\na broken feature until the following month, causing frustration and decreasing\nthe value that GitLab brings to its users. So, it’s important that we test and\nship with certainty. [Marin Jankovski](/company/team/#maxlazio), Engineering Manager of\nthe Distribution & Release Management teams, and [Sean McGivern](/company/team/#mcgivernsa),\nEngineering Manager of the Plan team, note the importance of testing and shipping features.\n\n\n>“Finding a way to test changes faster can be challenging. With the Distribution\nteam, we have the responsibility of ensuring that the release we ship still\nfunctions after we make our changes and that users can still install and use GitLab.”\n– Marin Jankovski\n\n>“Our release process is a big challenge, if you consider that the cycle ends\nonce customers have the feature available to use. We don’t have CD for\nGitLab.com, but even if we did, for self-managed customers, we only have one\nfeature release a month. So, that’s a hard limit.” – Sean McGivern\n\n### Differentiating the helpful from the unhelpful\n\nEvery workflow has components that can decrease release cycles, including code\nreviews, manual configuration and testing, and hand-offs. Some of these elements\nare necessary, like product manager meetings, but other aspects can unintentionally\ncause problems. [Tommy Morgan](/company/team/#itstommymorgan), Director of Engineering of\nDev Backend, highlights the essential measures that teams need to take to promote\ncollaboration and alignment but may increase cycle times.\n\n\n>“Teams have all these things that are slowing down cycle times, and there could\nbe extra steps or extra involvement that aren’t necessary or beneficial and that\ncould unintentionally add pressure to the team to slow down. One of the biggest\nchallenges is identifying which ones are legitimate and helpful and which ones\nare us giving into the natural urge to add process. Identifying across that fine\nline is where the real challenge comes into play for most teams.”\n\n### Working across teams\n\nCross-collaboration fosters innovative thinking and allows each team to specialize\nin a specific area to maximize contributions. While the benefits of working with\nmultiple teams are abundant, depending on another team’s feedback or assistance\nslows down development, especially when there’s a blocker that can only be resolved\nwith the help of one team. [Rachel Nienaber](/company/team/#rachel-nienaber), Engineering\nManager of the Geo team, and Marin agree that working across teams can have\nsignificant impact on cycle times.\n\n\n>“When other teams implement a new feature that needs some additional work from\nthe Distribution side, getting informed in time is extremely important. We need\nto affect the decision as early as possible, because we have certain limitations\nwhen it comes to distributing GitLab.” – Marin Jankovski\n\n>“One challenge that I see is that there are a lot of dependencies on people\nexternal to the team to ship features. Ordinarily, a quick way to shorten cycle\ntime is to reduce those dependencies, but here at GitLab, that may reduce the\namount of collaboration that happens with each feature. Collaboration is such an\nimportant [value](https://handbook.gitlab.com/handbook/values/#collaboration) that this may have to take\nprecedence in some cases and be more important than the gain in speed.” – Rachel Nienaber\n\n### Asynchronous communication\n\nAt GitLab, we practice [asynchronous communication](https://handbook.gitlab.com/handbook/communication/),\nso we “don’t expect an instantaneous response,” allowing us to focus on our\nindividual workflows. The problem with working asynchronously is that projects\ncan become delayed when working with team members in different time zones and\nresponses don’t trickle in until the following day. Rapid movement might not be\nmade on projects because of time zone differences. [Mek Stittri](/company/team/#mekdev),\nEngineering Manager of the Quality team, and Rachel acknowledge the difficulties\nthat can come with asynchronous communication.\n\n>“My team is spread across so many projects and has someone in almost every time\nzone, meaning communication can be challenging.” – Mek Stittri\n\n>“This is my first role with an asynchronous method of working. I am finding that\nmany practices that work in a synchronous team need some adjustment to be useful here.” – Rachel Nienaber\n\n## What are some solutionsb to reducing cycle times?\n\nAt GitLab, we’re fortunate to have the freedom to experiment and\n[iterate](https://handbook.gitlab.com/handbook/values/#iteration), so we’ve been able to develop a few\nstrategies to help us alleviate the challenges we face when meeting customer demands by reducing cycle times.\n\n### How to get it right the first time\n\n\u003Col start=\"1\">\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Automate work as much as possible.\u003C/b> Using CI to automatically do releases and investing time in automating\n        other manual tasks is crucial for delivery. Manual tasks are both a huge\n        drain on morale and prone to errors. It’s much easier to give engineers\n        a bug to fix in an automated tool than to ask them to do the same thing\n        multiple times.\n    \u003C/p>\n    \u003C/li>\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Work with smaller, iterative pieces.\u003C/b> Breaking work into smaller chunks,\n        \u003Ca href=\"/handbook/values/#iteration\">iterating\u003C/a> frequently, and\n        \u003Ca href=\"https://gitlab.com/gl-retrospectives/plan/issues/10\">indicating priority more clearly\u003C/a>\n        within a milestone enables better predictability for what’s going to ship.\n        Planning becomes easier, because individual issues are smaller, so it’s\n        easy to shuffle issues around if something unexpected interrupts other\n        work.\n    \u003C/p>\n    \u003C/li>\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Use feature flags.\u003C/b> Rather than using a giant merge request to make\n        every change for a feature at once, which is harder to review, update,\n        and keep up-to-date with the master branch, consider developing\n        more features behind short-lived \u003Ca href=\"https://docs.gitlab.com/ee/development/feature_flags/index.html\">feature flags\u003C/a>.\n    \u003C/p>\n    \u003C/li>\n\u003C/ol>\n\n### How to differentiate the helpful from the unhelpful\n\n\u003Col start=\"4\">\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Measure the impact of components.\u003C/b> Measuring impact can help determine\n        whether a process either doesn’t help out that much in the end or helps\n        out infrequently. In either case, the net benefit can be small, but the\n        pain it adds (in terms of how much extra time you spend trying to ship)\n        makes the overall impact negative. If you can’t measure impact directly,\n        you have to be willing to experiment. Try things, see how they work, and\n        decide if you should keep them or not. It’s important to remember that\n        experimentation doesn’t mean process creep - the default end state for\n        an experiment should be “let’s never do that again,” unless there’s a\n        strong sense of value in it.\n    \u003C/p>\n    \u003C/li>\n\u003C/ol>\n\n\n### How to successfully work across teams\n\n\u003Col start=\"5\">\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Communicate and automate where possible.\u003C/b> Automating how others get a\n        finished product before releasing it (e.g. create a package on click)\n        and \u003Ca href=\"/handbook/engineering/development/enablement/systems/distribution/#how-to-work-with-distribution\">broadly communicating\u003C/a>\n        how to work with a team can result in better decisions and faster discussions.\n    \u003C/p>\n    \u003C/li>\n\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Develop a training program.\u003C/b> Creating a training program to help engineers\n        from other teams perform reviews can reduce cycle time for those teams\n        that regularly depend on the Database team. This strategy has the added\n        benefit of giving the Database team more time to focus on their own work.\n    \u003C/p>\n    \u003C/li>\n\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Use project management tooling.\u003C/b> Consistent \u003Ca href=\"/handbook/engineering/quality/project-management/\">project management tooling\u003C/a>\n        ensures consistent board configuration that behaves the same at every level,\n        meaning that data rolls up to one top level board which contains a\n        snapshot of an entire team, ensuring that prioritization is clear and\n        workload is transparent.\n\n    \u003C/p>\n    \u003C/li>\n\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Spread triaging.\u003C/b> To spread the load of triaging across teams, use \u003Ca href=\"https://gitlab.com/gitlab-org/gitlab-ce/issues?scope=all&utf8=%E2%9C%93&state=closed&label_name[]=triage-package\">triage-package\u003C/a>.\n        Here is a \u003Ca href=\"https://gitlab.com/gitlab-org/gitlab-ce/issues/52024\">recent example\u003C/a>\n        of how we used triage-package to lessen the burden on one team.\n\n    \u003C/p>\n    \u003C/li>\n\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Have more focused milestones.\u003C/b> Creating focused milestones can reduce\n        context switching, since team members can concentrate on specific aspects\n        of a feature.\n    \u003C/p>\n    \u003C/li>\n\u003C/ol>\n\n### How to make asynchronous communication work\n\n\u003Col start=\"10\">\n    \u003Cli>\n    \u003Cp>\n        \u003Cb>Work on multiple items.\u003C/b> Having a list of multiple items to work on\n        during each release cycle helps team members easily transition to another\n        task rather than remaining blocked when waiting for feedback.\n    \u003C/p>\n    \u003C/li>\n\u003C/ol>\n\n## Advice\n\nReducing cycle times to meet internal cycle time goals can be a difficult undertaking, requiring the input from\nproduct managers, engineering leads, and developers. It’s a hard task to\nchallenge long-practiced behaviors, especially when the worst case scenario could\nmean features don’t make a release. Here is some advice to help your team's cycle time reduction effort.\n\n### Be thoughtful and considerate\n\n“At GitLab, we want to iterate quickly, but we also want to keep GitLab.com fast\nand stable. That means that we can’t just decide to ship things faster, we need\nto come up with strategies to mitigate any risks to performance and availability,\nbuild tooling and processes around those strategies. This is often work that can\ngo underappreciated, and it can be hard at times, but it’s vital to ensuring\nthat you can safely shorten cycle times.” – Sean McGivern\n\n### Retrospectives for learning\n\n“A successful team is a happy team. Bringing down production cycle time can help a team be\nmore successful because they are shipping value more often, but your team might\nhave more important things that must be addressed first. Using retrospectives\nwill help you to figure out what success means to your team, and what needs to\nbe done to achieve that success.” – Rachel Nienaber\n\n### Experiment\n\n“Make yourself uncomfortable. It’s unnatural to push for shorter cycle time.\nIt’s natural to add steps - it’s not natural to remove them. Try drastic cuts\nand be willing to learn from an experiment.” – Tommy Morgan\n\n### Spotlight your team\n\n“You can’t make product managers happy, so try to make your team happy instead\nby giving them a chance to shine. :P” – Marin Jankovski\n\n",[9,696],{"slug":3632,"featured":6,"template":699},"strategies-to-reduce-cycle-times","content:en-us:blog:strategies-to-reduce-cycle-times.yml","Strategies To Reduce Cycle Times","en-us/blog/strategies-to-reduce-cycle-times.yml","en-us/blog/strategies-to-reduce-cycle-times",{"_path":3638,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3639,"content":3644,"config":3650,"_id":3652,"_type":13,"title":3653,"_source":15,"_file":3654,"_stem":3655,"_extension":18},"/en-us/blog/take-advantage-of-git-rebase",{"title":3640,"description":3641,"ogTitle":3640,"ogDescription":3641,"noIndex":6,"ogImage":3585,"ogUrl":3642,"ogSiteName":685,"ogType":686,"canonicalUrls":3642,"schema":3643},"Take advantage of Git rebase","Tap into the Git rebase features to improve your workflow.","https://about.gitlab.com/blog/take-advantage-of-git-rebase","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Take advantage of Git rebase\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christian Couder\"}],\n        \"datePublished\": \"2022-10-06\",\n      }",{"title":3640,"description":3641,"authors":3645,"heroImage":3585,"date":3647,"body":3648,"category":832,"tags":3649},[3646],"Christian Couder","2022-10-06","These days, developers spend a lot of time reviewing merge requests\n\nand taking these reviews into account to improve the code. We'll discuss how\n\n[Git rebase](https://git-scm.com/docs/git-rebase) can help in\n\nspeeding up these review cycles. But first, let's take a look at some\n\nworkflow considerations.\n\n\n## Different ways to rework a merge request\n\n\nA developer who worked on some code changes and created a merge\n\nrequest with these changes will often have to rework them. Why does\n\nthis happen? Tests can fail, bugs are found, or reviewers suggest\n\nimprovements and find shortcomings.\n\n\n### Simple but messy method: add more commits\n\n\nOne way to rework the code changes is to make more changes in some new\n\ncommits on top of the branch that was used to create the merge\n\nrequest, and then push the branch again to update the merge\n\nrequest.\n\n\nWhen a number of commits have been added in this way, the merge\n\nrequest becomes problematic:\n\n\n- It's difficult to review by looking at all the changes together.\n\n- It's difficult to review the commits separately as they may contain\ndifferent unrelated changes, or even multiple reworks of the same code.\n\n\nReviewers find it easier to review changes split into a number of small,\n\nself-contained commits that can be reviewed individually.\n\n\n### Pro method: rebase!\n\n\nA better method to prepare or rework a merge request is to always\n\nensure that each commit contains small, self-contained, easy-to-review\n\nchanges.\n\n\nThis means that all the commits in the branch may need reworking\n\ninstead of stacking on yet more commits. This approach might seem much\n\nmore complex and tedious, but `git rebase` comes to the rescue!\n\n\n## Rework your commits with `git rebase`\n\n\nIf your goal is to build a merge request from a series of small,\n\nself-contained commits, your branch may need significant rework before its\n\ncommits are good enough. When the commits are ready, you can push the branch\n\nand update or create a merge request with this branch.\n\n\n### Start an interactive rebase\n\n\nIf your branch is based on `main`, the command to rework your branch\n\nis:\n\n\n```plaintext\n\ngit rebase -i main\n\n```\n\n\nI encourage you to create [a Git\nalias](https://git-scm.com/book/en/v2/Git-Basics-Git-Aliases),\n\nor a shell alias or function for this command right away, as you will\n\nuse it very often.\n\n\nThe `-i` option passed to `git rebase` is an alias for\n\n`--interactive`. It starts\n\n[an 'interactive'\nrebase](https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---interactive)\n\nwhich will open your editor. In it, you will find a list of the\n\ncommits in your branch followed by commented-out lines beginning with\n\n`#`. The list of commits looks like this:\n\n\n```plaintext\n\npick 1aac632db2 first commit subject\n\npick a385014ad4 second commit subject\n\npick 6af12a88cf other commit subject\n\npick 5cd121e2a1 last commit subject\n\n```\n\n\nThese lines are instructions for how `git rebase` should handle these\n\ncommits. The commits are listed in chronological order, with the\n\noldest commit at the top. (This order is the opposite of the default\n\n`git log` order.) What do these lines contain?\n\n\n- An instruction (here, `pick`) that tells Git what action to take\n\n- An abbreviated commit ID\n\n- A commit subject to help you identify the commit contents\n\n\n### Edit the instruction list\n\n\nYou can edit these instructions! When you quit your text editor, `git\nrebase`\n\nreads the instructions you've just edited, and performs them\n\nin sequence to recreate your branch the way you want.\n\n\nAfter the instructions for all commits, a set of commented-out lines\n\nexplain how to edit the instruction lines, and how each instruction\n\nwill change your branch:\n\n\n- If you **delete a commit's entire instruction line** from the list,\n  that commit won't be recreated.\n- If you **reorder the instruction lines**, the commits will be\n  recreated in the order you specify.\n- If you **change the action** from `pick` to something else, such as\n  `squash` or `reword`, Git performs the action you specify on that\n  commit.\n- You can even **add new instruction lines** before, after, or between\n  existing lines.\n\nIf the comment lines aren't enough, more information about what you\n\ncan do and how it works is available in:\n\n\n- The [Git Tools - Rewriting\nHistory](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History)\n  section of the \"Pro Git\" book\n- The [Interactive\nmode](https://git-scm.com/docs/git-rebase#_interactive_mode)\n  section of the `git rebase` documentation\n\n### Continue or abort the rebase\n\n\nAn interactive rebase can stop if there is a conflict (as a regular\n\nrebase would) or if you used an instruction like `edit` in the\n\ninstruction line. This allows you to make some changes, like splitting\n\nthe current commit into two commits, or fixing the rebase conflict if\n\nthere is one. You can then either:\n\n\n- Continue the interactive rebase with `git rebase --continue`.\n\n- Abort the rebase altogether with `git rebase --abort`.\n\n\n(These `git rebase` options also work when a regular, non-interactive\n\nrebase stops.)\n\n\n## Further tips and benefits\n\n\n### Try different instructions\n\n\nI recommend you try out the different instructions you can use in\n\neach instruction line, especially `reword`, `edit`, `squash`, and `fixup`.\nYou'll\n\nsoon want to use the abbreviated versions of these instructions: `r`,\n\n`e`, `s`, and `f`.\n\n\n### Run shell commands in your rebase\n\n\nYou might also have noticed an `exec \u003Ccommand>` instruction that\n\nallows you to run any shell command at any point in the interactive rebase.\n\nI've found it more useful for non-interactive rebases, such as:\n\n\n```plaintext\n\ngit rebase --exec 'make test' main\n\n```\n\n\n(It's not an interactive rebase because it doesn't contain the `-i` flag.)\n\n\nThe `--exec \u003Ccommand>` flag allows you to run any shell command after\n\neach rebased commit, stopping if the shell command fails (which is\n\nsignaled by a non zero exit code).\n\n\n### Test all your commits\n\n\nPassing a command to build your software and run its tests, like\n\n`make test`, to `--exec` will check that each commit in your branch\n\nbuilds correctly and passes your tests.\n\n\nIf `make test` fails, the rebase stops. You can then fix the current\n\ncommit right away, and continue the rebase to test the next\n\ncommits.\n\n\nChecking each commit builds cleanly and passes all the tests ensures\n\nyour code base is always in a good state. It's especially useful if\n\nyou want to take advantage of\n\n[Git bisect](https://git-scm.com/docs/git-bisect) when you encounter\n\nregressions.\n\n\n## Conclusion\n\n\nIn Git, a rebase is a very versatile and useful tool to rework\n\ncommits. Use it to achieve a workflow with high-quality changes\n\nproposed in high-quality commits and merge requests. It makes your\n\ndevelopers and reviewers more efficient. Code reviews and debugging also\nbecome easier and more effective.\n\n\n**EDIT:** Check out our [follow-up post on how you can apply this is real\nlife](/blog/rebase-in-real-life/).\n",[1684,9,835,1035],{"slug":3651,"featured":6,"template":699},"take-advantage-of-git-rebase","content:en-us:blog:take-advantage-of-git-rebase.yml","Take Advantage Of Git Rebase","en-us/blog/take-advantage-of-git-rebase.yml","en-us/blog/take-advantage-of-git-rebase",{"_path":3657,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3658,"content":3664,"config":3669,"_id":3671,"_type":13,"title":3672,"_source":15,"_file":3673,"_stem":3674,"_extension":18},"/en-us/blog/tasktop-webcast-recap",{"title":3659,"description":3660,"ogTitle":3659,"ogDescription":3660,"noIndex":6,"ogImage":3661,"ogUrl":3662,"ogSiteName":685,"ogType":686,"canonicalUrls":3662,"schema":3663},"Cross-functional ≠ dysfunctional","Don't let process hold you back – here are our best practices for working cross-functionally.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671305/Blog/Hero%20Images/tasktop-integration-cover.png","https://about.gitlab.com/blog/tasktop-webcast-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Cross-functional ≠ dysfunctional\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2017-11-08\",\n      }",{"title":3659,"description":3660,"authors":3665,"heroImage":3661,"date":3666,"body":3667,"category":811,"tags":3668},[1133],"2017-11-08","\n\nWe recently teamed up with [Tasktop](https://www.tasktop.com/integrations/gitlab-issues) to talk about processes and how to make sure\nthey work for you instead of against you. Check out the highlights below.\n\n\u003C!-- more -->\n\nCreating great software involves a number of different disciplines, each of\nwhich may use their own tool for managing work. Chaos might seem inevitable, but\nwe've learned that a few guiding principles can help to connect people and keep\nchannels of communication open.\n\n## 1. Goals first, process second\n\nProcess exists to serve goals. Before you put processes in place or continue with existing ones, take a step back to establish what you're trying to achieve. Getting input from all stakeholders to determine goals will help to set clear expectations up front and allow everyone to voice their concerns about the scope of the project. Armed with this information, you can then decide on the best process (including timelines, review cycles and communication vehicles) to achieve the desired outcome.\n\n## 2. Establish a single source of truth\n\nWith so many stages and so much activity involved in creating a product or feature, it can be hard to keep track of what's going on. This potential for chaos is quelled by establishing a single source of truth. So when you've outlined your goals and settled on a process for achieving them, write it all down so that everyone has something to refer to and there's no confusion about what was decided or what stage something is at. This is especially helpful for distributed teams, as it means people in other locations and time zones can get up to speed quickly and collaborators can work asynchronously.  \n\n## 3. Clear, visible outcomes\n\nWhat exactly does success mean for your project? What metrics will you use? You want clear, measurable outcomes for what you're working on, so that everyone can see what's expected of them and others. At GitLab, we use [issue trackers](https://docs.gitlab.com/ee/user/project/issues/) to follow the progress of a new feature or project. Individual issues can be customized to reflect the problem you're trying to solve, how you're going to go about it, and what the outcome should be. Issues can be connected to related [merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/) so that all involved stakeholders can view new developments or changes right away, in a production-like environment. This way concerns or problems can be flagged at any stage along the way.\n\n## 4. Work cross-functionally from start to finish\n\nThe above guidelines only work if all your different functions are in communication. Instead of locking communication per-stage, per team, or per-specialty principle, leave the doors as open as possible. This minimizes risk, as GitLab Product Manager [Victor Wu](/company/team/#victorwu416) explains:\n\n> When you're creating software, and you're creating a feature, you probably want a security stakeholder involved. Security is often something that's tacked on at the end, but if it's baked directly into the design of the software it will be accounted for, and you can estimate the cost or effort required to design and implement something that accounts for security instead of backtracking later.\n\nCross-functional working also encourages a diversity of ideas from different teams contributing to a feature, which can result in a better outcome. You can foster open communication by working more transparently: make your goals, processes and metrics for success visible to your whole organization, if possible, and invite feedback. Use real-time editing tools (such as Google docs) for meetings and allow everyone to add to the agenda, take notes or suggest follow-up items.\n\n## 5. Improve the process in iterations\n\nFeeling inspired? Before you throw out all your existing processes, think about whether you can [iterate](https://handbook.gitlab.com/handbook/values/#iteration) on them instead. Radical change can be difficult for people to embrace, so you may have more success with gradual adjustments. Identify something that's not working well, and a small change you can make to improve on it.\n\n> Try to win those small battles, solve those small problems, week by week and month to month, and over time your process will improve.\n\n## Recording\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/8X6x54gaYRo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Slides\n\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://docs.google.com/presentation/d/e/2PACX-1vRcQw1XTuEk12ALqnrjMSTPLQ9OAm6Mmzn-eIoUOCJgUdX8dVDejdmN_HaK2AW1lVq1iDG7VxmzaXcD/embed?start=false&loop=false&delayms=3000\" frameborder=\"0\" width=\"960\" height=\"569\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\nYou can read more about [Tasktop's GitLab integration here](/blog/tasktop-gitlab-integration/).\n",[1137,232,696,9],{"slug":3670,"featured":6,"template":699},"tasktop-webcast-recap","content:en-us:blog:tasktop-webcast-recap.yml","Tasktop Webcast Recap","en-us/blog/tasktop-webcast-recap.yml","en-us/blog/tasktop-webcast-recap",{"_path":3676,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3677,"content":3683,"config":3689,"_id":3691,"_type":13,"title":3692,"_source":15,"_file":3693,"_stem":3694,"_extension":18},"/en-us/blog/teams-gitpod-integration-gitlab-speed-up-development",{"title":3678,"description":3679,"ogTitle":3678,"ogDescription":3679,"noIndex":6,"ogImage":3680,"ogUrl":3681,"ogSiteName":685,"ogType":686,"canonicalUrls":3681,"schema":3682},"Teams speed up development with GitLab's Gitpod integration","Learn about Gitpod as cloud development environment, and how its integration into Gitpod helps teams to get more efficient in their DevOps lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667482/Blog/Hero%20Images/cover-image-unsplash.jpg","https://about.gitlab.com/blog/teams-gitpod-integration-gitlab-speed-up-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How teams can use the Gitpod integration in GitLab to speed up their development process\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2021-07-19\",\n      }",{"title":3684,"description":3679,"authors":3685,"heroImage":3680,"date":3686,"body":3687,"category":832,"tags":3688},"How teams can use the Gitpod integration in GitLab to speed up their development process",[2491],"2021-07-19","\n\nTurn back time a bit and try to remember the first project you started or joined, and the onboarding experience. How long did it take to install the development environment on your local machine?\n\nWe talked about our own onboarding experiences into software development, and thought about sharing our favorite tips with GitLab users.\n\n## A developer's tale\n\nEveryone starts fresh, and often best practices are just \"learning by doing,\" requiring documentation in the same moment. Programming languages and application architectures are also different - a C++ backend environment has different requirements than a Ruby on Rails web application.\n\nStart with defining the requirements and stages. Oftentimes they are equivalent to CI/CD pipeline stages but executed in your own environment.\n\n* Compile/build the application and verify that the source code is valid (\"build\")\n* Run linting, unit tests, code quality checks (\"test\")\n* Run the application in a dev environment (\"runtime test\")\n* Package the application, run installation tests (\"staging installation\")\n* Run the installed application (\"staging deployment\")\n* Tag, release, and deploy the application (\"release production deployment\")\n\nYou want to run the application in a development environment quickly, everything else with staging and deployments continues to run in your CI/CD pipelines. Their implementation and availability should be on your to-do list.\n\nSoftware applications can depend on existing libraries which are used by many other developers, and help speed up the development process. These dependencies need to be installed into the development environment - if that is your local macOS, Windows or Linux desktop, methods and requirements will differ.\n\n### Provision development environments\n\nCreating a development environment for many different operating systems has its disadvantages: Error messages can differ and implementation specific details do not produce the same results and require back-and-forth communication on the team. This often leads to friction and slowed down development processes.\n\nOne key learning over the past decade has been to use CI/CD extensively to test different environments and operating systems, and rely on fast feedback in Merge Requests. Developers should be able to focus on their development environment without having to worry about the many production use cases and support.\n\nVirtual machines in Vagrant, and Docker containers made the generic development environment creation easier and efficient. The documentation instructed everyone to either execute `vagrant up` or `docker-compose up -d` and have the development stack ready. The road to creating Vagrant and Docker base images, including the provisioning scripts with Bash, Ansible, Puppet, etc., was and still is a huge learning process. Opinions on \"good\" best practices differ, and adding your preferred IDE on top of a CLI only VM or container often is an adventure on its own.\n\nBandwidth and traffic can also come into play - each provision and software installation run may consume gigabytes of data. If the workloads and provisioning would run in the cloud, your local connection is not affected.\n\nOne customer mentioned a while ago that their company policy forbids installing a local IDE without a license. The Web IDE in GitLab solves this problem for them throughout the onboarding month.\n\n### Development environment in the browser\n\nThe Web IDE helps with basic programming tasks, editing the documentation or setting up the CI/CD configuration. It does not provide a fully fledged server runtime, as cloud IDE with a programming environment capable of understanding the language you are programming in would. Our vision is to explore ways to [add integrated development environments into the Web IDE](/handbook/engineering/incubation/server-runtime/).\n\nThere are a variety of tools and environments following remote collaboration ideas and the cloud IDE approach. You can learn more in [this Twitter thread](https://twitter.com/sytses/status/1400134840754733059) from [GitLab co-founder and CEO, Sid Sijbrandij](/company/team/#sytses). One approach is [Gitpod](https://gitpod.io/), allowing you to spin a fresh environment in the cloud in seconds.\n\nGitpod uses Visual Studio Code (VS Code) as cloud IDE, and integrates with their marketplace to install the same extensions as you would install locally in VS Code. One of the coolest things about Gitpod is that it not only spins up a fresh environment, but also allows you to install additional software or bring your own workspace container image. That way everyone uses the same pre-provisioned environment, and pair programming and debugging becomes a breeze.\n\nNext time, the same state is booted up, secured by single sign-on.\n\n## First steps with Gitpod\n\nNavigate to [gitpod.io](https://gitpod.io) and choose to `continue with GitLab` as login.\n\nIf you are running a self-managed GitLab setup, ask your administrator to [enable the Gitpod integration](https://docs.gitlab.com/ee/integration/gitpod.html).\n\nLet's start with creating a VueJS application. Fork the [learn-vuejs-gitpod](https://gitlab.com/gitlab-de/playground/learn-vuejs-gitpod) project on GitLab.com.\n\n### Alternative: Start on your CLI\n\nAlternatively to forking the project, install NodeJS, npm and the `vue-cli` package, and run `vue create learn-vuejs-gitpod`. The vue command already initializes and commits based on your local Git configuration. Add the remote origin and push to a new repository on the remote GitLab server.\n\n```shell\n$ brew install node\n$ yarn add @vue/cli\n$ vue create learn-vuejs-gitpod\n\n$ cd learn-vuejs-gitpod\n$ git remote add origin https://gitlab.com/\u003Cyourusername>/learn-vuejs-gitpod.git\n$ git push -u origin main\n```\n\nGitLab will [create a private project from the git push command](https://docs.gitlab.com/ee/user/project/working_with_projects.html#create-a-new-project-with-git-push).\n\n### Start Gitpod\n\nStart Gitpod from the repository overview by selecting the dropdown switch from the Web IDE.\n\n![Gitpod VueJS Start](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_gitlab_start_vuejs.png)\n\nSign into your GitLab account with SSO once asked. Accept the required permissions, and wait until the Gitpod environment is booted up.\n\n![Gitpod VueJS Overview](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_vuejs_overview.png)\n\nChange to the terminal and run yarn to install the dependencies and start the development server. No worries, we'll show you how to automate this in a second!\n\n```shell\nyarn install\nyarn serve\n```\n\nGitpod detects the server listening on port 8080 and offers to make it public. Open the browser instead - it works but says `Invalid host header` because the dev server checks the host name. For running inside Gitpod containers, you need to [disable the host checks](https://github.com/gitpod-io/gitpod/issues/26#issuecomment-554058232).\n\nLet's fix this inside Gitpod in the project. Navigate into the left file tree, and add a new file called `vue.config.js` in the top level.\n\n![Gitpod VueJS Overview](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_vuejs_config_disable_host_checks_devserver.png)\n\nCopy the following code snippet into it\n\n```js\n// vue.config.js\nmodule.exports = {\n    // Rationale: https://github.com/gitpod-io/gitpod/issues/26#issuecomment-554058232\n    devServer: {\n        disableHostCheck: true\n    }\n}\n```\n\nAnd stop the running `yarn serve` command in the terminal by pressing `crtl+c`. Press `cursor up` to select the previous command, or type `!!` to repeat the last command followed by `enter` to start the devserver again. Voilà!\n\n![VueJs running app in Gitpod](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_vuejs_web_app.png)\n\nDon't forget to add and commit the new configuration file to persist the changes. Navigate into the `Source Control` section highlighting one pending change. Enter a commit message, click the check mark and approve all pending changes into the commit.\n\n![Gitpod Source Control](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_source_control_add_vuejs_config.png)\n\nSelect the `...` menu to `push` the Git history. Gitpod will ask you for `repository read/write` permissions, walk through the forms and edit them on Gitpod itself. Navigate back to the Gitpod project interface and re-do the push.\n\nFrom the first success, it is not far to your first customized VueJS application. But wait, there is more to learn about Gitpod and efficient workflows!\n\n### VS Code Extensions\n\nNavigate into the `Extensions` menu and search for `gitlab workflow`. Install the extension. We recommend installing it globally for your account and all future workspaces.\n\n![Gitpod extension: GitLab workflow for VS Code](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_extension_gitlab_workflow.png)\n\nNext, navigate into the new GitLab menu item on the left, and configure the extension. It needs a personal access token, similar to the process with a local VS Code extension configuration. Follow the steps in the [Gitlab documentation to create a personal access token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token).\n\n![Gitpod: GitLab workflow extension config](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_gitlab_workflow_extension_config.png)\n\n## Speed up your own projects\n\nUsing Gitpod and GitLab to develop GitLab makes it easy to contribute, but what about your own DevOps lifecycle and projects? Below are a few more examples to speed up your development with Gitpod and GitLab.\n\nRemember: You can start Gitpod without any configuration, directly from a GitLab repository. If there are additional settings needed, you can develop them while learning from the examples and documentation best practices.\n\n### Hugo Pages website live review\n\nYou can use Hugo with GitLab pages to host your own private blog, for example. Hugo is a static site generator written in Go, with public Docker images already available. The deployment of [everyonecancontribute.com](https://everyonecancontribute.com/) uses the following configuration in the [.gitlab-ci.yml](https://gitlab.com/everyonecancontribute/web/everyonecancontribute.gitlab.io/-/blob/main/.gitlab-ci.yml) configuration:\n\n```yaml\n.publish: &publish\n  image: registry.gitlab.com/pages/hugo:latest\n  script:\n    - hugo\n  artifacts:\n    paths:\n    - public\n\npages:\n  stage: publish\n  \u003C\u003C: *publish\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n      when: always\n  environment:\n    name: $CI_PROJECT_NAME\n    url: https://$CI_PROJECT_NAME/\n```\n\nA local development environment to preview the website needs the Hugo binary installed. Doing the same in the browser, running the Hugo CLI command and previewing the blog post? We've found a way to provision Gitpod in the same way, using [this .gitpod.yml configuration](https://gitlab.com/everyonecancontribute/web/everyonecancontribute.gitlab.io/-/blob/main/.gitpod.yml):\n\n```yaml\nimage: klakegg/hugo:debian\n\nports:\n  - port: 1313\n\ntasks:\n  - command: hugo server -D -b $(gp url 1313) --appendPort=false\n```\n\nThe Hugo container image gets pulled and the Gitpod workspace builder prepares the environment. Note that [Alpine based images do not work](https://github.com/gitpod-io/gitpod/issues/3356#issuecomment-877604994), use Debian variants instead. After starting the workspace, the tasks run the command, and expose a port. The port binding needs to be the external URL of the pod, not localhost. `gp url 1313` builds the exact URL, and binds the socket to the Hugo server, making the pod URL publicly accessible for reviews.\n\n![Gitpod: Hugo website](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_hugo_everyonecancontribute_com.png)\n\nFrom there, you can switch branches in Gitpod, and immediately verify the changes.\n\n### VueJS with custom container image\n\nGetting started with VueJS in a new project with the `vue-cli` package is very convenient and the Gitpod docs have a [guide](https://www.gitpod.io/docs/languages/vue/#vue-cli) ready. The default `gitpod/workspace-full` image does not provide the `vue cli` package. You can extend the container image by using your [custom .gitpod.Dockerfile](https://www.gitpod.io/docs/config-docker#configure-a-custom-dockerfile) - Gitpod takes care of building the image first, and later starts the workspace based on it.\n\n```yaml\nFROM gitpod/workspace-full\n\nRUN yarn add @vue/cli\n```\n\nThe `.gitpod.yml` configuration file needs to be instructed to build and use a custom image. On startup, the `tasks` section runs the initial dependency installation, and starts the development environment with `yarn serve`. The server listens on port 5000 by default, this is what gets [exposed](https://www.gitpod.io/docs/config-ports), and instructed to open as call-to-action in the browser.\n\n\n```yaml\nimage:\n  file: .gitpod.Dockerfile\n\ntasks:\n  - init: yarn install\n    command: yarn serve\n\nports:\n  - port: 5000\n    onOpen: open-browser\n```\n\nYou can combine Gitpod for previewing the website with the production deployment using the [five minute production app deployment template](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template) shown in [this project](https://gitlab.com/gitlab-de/playground/5-min-prod-app-vuejs). GitLab takes care of provisioning a free AWS EC2 instance, TLS certificates and domain handling.\n\n### More Gitpod workspace images\n\nGitpod provides many [ready-to-use workspace images](https://github.com/gitpod-io/workspace-images). In order to use them, create the `.gitpod.yml` file with this content:\n\n```yaml\nimage:\n  file: .gitpod.Dockerfile\n```\n\nCreate a new `.gitpod.Dockerfile` file and add the import from the desired workspace image.\n\n```yaml\nFROM gitpod/workspace-mysql\n```\n\nIf you need to install additional software, note that the full workspace image is based on Debian and therefore you'll need to use the `apt` package manager. The following command updates the package index, and clears the cache after installation to keep the image clean.\n\n```\nRUN sudo apt update && sudo apt install -y PACKAGENAME && sudo rm -rf /var/lib/apt/lists/*\n```\n\nIf you are not sure about the package name, run Docker locally and search for the package name. Fair warning: The `gitpod/workspace-full` image is huge, use the base image `debian:latest` instead.\n\n```shell\n$ docker run -ti debian:latest bash\n$ apt search POSSIBLENAME\n```\n\nYou can learn more  the [workspace image repository](https://github.com/gitpod-io/workspace-images) to learn more about the Dockerfile configuration used by the builder.\n\n## Do more with Gitpod\n\n### Merge request code reviews\n\nThe GitLab workflow extension comes with more super powers:\n\n* Access the project and Merge Requests\n* Check the CI/CD pipeline status directly in Gitpod\n* Perform MR code reviews in Gitpod and take advantage of [VS Code workflows](/blog/mr-reviews-with-vs-code/)\n\n![Gitpod: MR Code Reviews with the GitLab Workflow extension website](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_vs_code_gitlab_workflow_extension_mr_code_reviews.png)\n\n### Pre-install VS Code Extensions\n\nIn order to ensure specific [VS Code extensions](https://www.gitpod.io/docs/vscode-extensions/) are installed, you can define them in the `.gitpod.yml` configuration file in the repository. Example from the [GitLab project](https://gitlab.com/gitlab-org/gitlab/-/blob/master/.gitpod.yml#L79):\n\n```yaml\nvscode:\n  extensions:\n    - rebornix.ruby@0.28.0\n    - wingrunr21.vscode-ruby@0.27.0\n    - karunamurti.haml@1.3.1\n    - octref.vetur@0.34.1\n    - dbaeumer.vscode-eslint@2.1.8\n    - gitlab.gitlab-workflow@3.24.0\n```\n\n### Learn new programming languages: Rust\n\nGitpod allows you to start a fresh pod environment, pause on idle, and continue at a later point. The default workspace environment image already includes the [Rust compiler](https://www.gitpod.io/docs/languages/rust), which means that you can immediately [start learning Rust](https://doc.rust-lang.org/rust-by-example/).\n\nCreate a new project called `learn-rust` and open Gitpod from the repository view. Add a new file on the left tree view called `hello.rs` and add the following content:\n\n```rust\nfn main() {\n\tprintln!(\"Hello from GitLab! 🦊\");\n}\n```\n\nChange into the terminal and run the following command:\n\n```shell\n$ rustc hello.rs\n```\n\nWe started learning Rust together in an [#EveryoneCanContribute cafe](https://everyonecancontribute.com/post/2020-10-07-cafe-3-gitpod-gitlab-rust/) in October 2020 including [workshop slides with exercises](https://docs.google.com/presentation/d/1t1FdHh04TAOg9WITqRFJHz1YFxMbsQeekN8th1UfFcI/edit). We continued with [Rocket.rs](https://everyonecancontribute.com/post/2021-06-30-cafe-36-rust-rocket-prometheus/) as web app and additional Prometheus monitoring metrics in June 2021. You can watch the recordings to follow the learning process, the mistakes we made on the way, and the first success.\n\n### How to contribute to GitLab with Gitpod\n\nA more complex development environment is GitLab itself. The [architecture](https://docs.gitlab.com/ee/development/architecture.html) involves many different components, and the development environment requires you to install several dependencies in Ruby, NodeJS, Go, and backend applications. The GitLab Development Kit (GDK) describes the steps in detail - in order to get everything up and running, you need to plan for a 30 minutes to three hour process, depending on the compute power and bandwidth.\n\nEarly in the process of adopting Gitpod for GitLab team members, the groundwork with the base image and bootstrap script took the majority of the preparation time. You can learn more about the integration process in [this issue request](https://gitlab.com/gitlab-org/gitlab-development-kit/-/issues/1076).\n\n> It's already possible to try out how the setup works by opening Gitpod, which after waiting for the setup to finish (six to eight minutes) will bring you the Gitpod UI with the GDK fully running and ready for you to make changes and commit. As soon as that setup is finished, you can switch to whatever branch you want, either from the Gitpod UI or via the terminal.\n\nThe [GDK documentation for Gitpod](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/doc/howto/gitpod.md) guides you through the required steps. **Important**: You need to start Gitpod from the [gitlab-org/gitlab](https://gitlab.com/gitlab-org/gitlab/) project (as team member, as contributor, please fork the repository). Additional features, such as a local GitLab runner, feature flags, Advanced search, etc., must be [enabled manually](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/doc/howto/gitpod.md#configure-additional-features).\n\n![GitLab Development Kit running in Gitpod](https://about.gitlab.com/images/blogimages/gitlab-gitpod-teams-development/gitpod_gitlab_gdk_running.png)\n\n### Everyone can contribute\n\nReady? Start contributing to your favorite OSS project, and connect with your teams for an all-remote pair programming session using Gitpod! :-)\n\nCover image by [Thomas Lipke](https://unsplash.com/photos/oIuDXlOJSiE) on [Unsplash](https://unsplash.com)\n{: .note}\n",[232,696,9],{"slug":3690,"featured":6,"template":699},"teams-gitpod-integration-gitlab-speed-up-development","content:en-us:blog:teams-gitpod-integration-gitlab-speed-up-development.yml","Teams Gitpod Integration Gitlab Speed Up Development","en-us/blog/teams-gitpod-integration-gitlab-speed-up-development.yml","en-us/blog/teams-gitpod-integration-gitlab-speed-up-development",{"_path":3696,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3697,"content":3703,"config":3708,"_id":3710,"_type":13,"title":3711,"_source":15,"_file":3712,"_stem":3713,"_extension":18},"/en-us/blog/ten-devops-terms",{"title":3698,"description":3699,"ogTitle":3698,"ogDescription":3699,"noIndex":6,"ogImage":3700,"ogUrl":3701,"ogSiteName":685,"ogType":686,"canonicalUrls":3701,"schema":3702},"DevOps terminology: 10 terms that might surprise you","From Yoda to yaks and even baklava, here are 10 DevOps terms we’re betting you’ve never heard of.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681526/Blog/Hero%20Images/devopsterms.jpg","https://about.gitlab.com/blog/ten-devops-terms","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevOps terminology: 10 terms that might surprise you\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-08-25\",\n      }",{"title":3698,"description":3699,"authors":3704,"heroImage":3700,"date":3705,"body":3706,"category":718,"tags":3707},[2836],"2020-08-25","\n\nYou call yourself a [DevOps professional](/topics/devops/build-a-devops-team/) but do you know the definitions of yak shaving, Yoda conditions or baklava code?\n\nWe didn’t think so.\n\n## Benefits of DevOps\n\nDevOps outpaces the old software development methodologies like waterfall simply because it’s more efficient. Here are eight obvious DevOps wins:\n\n* Deployment is faster\n\n* Product quality is better\n\n* Automation simplifies the whole process\n\n* There’s flexible, continuous delivery\n\n* Scalability is even easier to achieve\n\n* Teams are transparent and communicative\n\n* There are faster fixes for bugs and other problems\n\n* It gives space to constantly iterate\n\nRegardless of your role on a business or a technical side, there are DevOps benefits for everyone.\n\n## DevOps terms and team communication\n\nA basic understanding of DevOps terms is important when it comes to optimal team communication. Otherwise, there are a lot of blank, blinking faces in the crowd. But even more important than simply understanding the terminology is consciously practicing good communication about DevOps and iterating on your team’s communication style.\n\nNew ideas, tools, and processes are constantly cropping up in the DevOps space, which means there is new terminology to learn. Great team communication involves continuously helping each other keep up with new knowledge and ensuring an environment of continuous learning.\n\n## DevOps terms glossary\n\nHere’s a look at our [DevOps](/topics/devops/) glossary with a focus on 10 DevOps terms even seasoned pros might not have encountered. And if you think there are some obscure ones we missed, please tell us about it [here](https://gitlab.com/gitlab-com/www-gitlab-com/-/issues/8878). We are working on a comprehensive GitLab guide to DevOps terms.\n\n### Devops term 1: Baklava code\n\n[Baklava](https://en.wikipedia.org/wiki/Baklava) is a dessert made up of many layers of thin phyllo dough – which is notoriously difficult to work with. Baklava code is the same: Lots of thin layers of code which makes it too fragile to stand up to real world use.\n\n### DevOps term 2: Dark launch\n\nA dark launch usually refers to a partial or incomplete release of a feature or features without any announcement. This under-the-radar release is a way to gather performance and testing data without the pressure of public input, because the features haven’t actually been talked about.\n\n### DevOps term 3: Dead code\n\nCode is considered \"dead\" if it lives in a program but actually doesn’t do anything and/or contribute to results or performance. Generally [dead code should be removed](https://refactoring.guru/smells/dead-code) as it’s a potential waste of space and computational power.\n\n### DevOps term 4: Everything-as-code\n\nEverything-as-code takes [infrastructure-as-code](https://searchitoperations.techtarget.com/definition/Infrastructure-as-Code-IAC) and goes one step further: Literally everything is treated as code including the infrastructure, virtual machines, and deployment configuration, to name a few. Everything-as-code is made possible by cloud native, proponents of it say it boosts traceability, repeatability, and testing. \n\n### DevOps term 5: Fear-driven development\n\nForget [FOMO](https://www.urbandictionary.com/define.php?term=Fomo), fear-driven development is what happens when project managers raise the stakes by moving up deadlines or laying people off. \n\n### DevOps term 6: NoOps\n\nIt’s DevOps without the \"Ops\" or what could happen if automation eliminates traditional ops tasks. Some see NoOps as the highest evolution of a successful DevOps practice while others don’t see it that way at all. NoOps joins a slew of other Ops-related terms including [GitOps](https://thenewstack.io/what-is-gitops-and-why-it-might-be-the-next-big-thing-for-devops/), [CIOps](https://dzone.com/articles/kubernetes-anti-patterns-lets-do-gitops-not-ciops), and more.\n\n### DevOps term 7: Rubberducking\n\nThis novel way of debugging code was made famous in the book [The Pragmatic Programmer](https://www.amazon.com/Pragmatic-Programmer-journey-mastery-Anniversary/dp/0135957052/ref=sr_1_1?dchild=1&keywords=the+pragmatic+programmer&qid=1598365813&sr=8-1). A programmer carries around a rubber duck and discovers that by explaining the code to the duck, line by line, the errors made themselves obvious. Translated for the real world, and practiced at GitLab, it means talking through your code with another developer which helps make flaws or logical errors more obvious.\n\n### DevOps term 8: Spaghetti code\n\nIf someone tells you your code is like spaghetti don’t take it as a compliment. Spaghetti code is all over the map, often with too many [GOTO statements](https://www.geeksforgeeks.org/goto-statement-in-c-cpp/). It’s poorly organized and often lacks any kind of traditional structure. \n\n### DevOps term 9: Yak shaving\n\nDuring a global pandemic when many are working from home, it’s safe to assume yak shaving is happening frequently, and it’s definitely a term that is used [outside of programming](https://americanexpress.io/yak-shaving/). In general, it means doing something that leads to something else but has nothing to do with the original goal. Programmers use it to refer to interminable tasks that must be done before a project can move forward, as in, \"I’ll get to that once I’ve shaved the yak.\"\n\n### DevOps term 10: Yoda conditions\n\n*Code you I will Luke Skywalker.* Yoda conditions refers to non-traditionally written code, i.e., code written as [Yoda](https://starwars.fandom.com/wiki/Yoda) speaks. Once you put yourself in the mindset it’s possible to understand what you’re looking at, but, just like Luke Skywalker experienced, it can take a while to get the hang of this.\n\n_Some of these are terms in use at GitLab, but in our research we stumbled across [the Coding Horror blog](https://blog.codinghorror.com/new-programming-jargon/) created by Jeff Atwood and we found a few new-to-us terms including Yoda conditions. Jeff refers to his list as the \"top 30 Stack Overflow new programming jargon entries.\"_\n\n## Growth of a DevOps culture\n\nA DevOps culture doesn’t grow simply because an organization decides to implement it. It takes daily, focused effort and cultivation. Some things organizations can do to foster the growth of a DevOps culture are to keep leadership in the loop, openly communicate across the team, and create a roadmap of shared goals and individual responsibilities to help achieve them. Understanding the lingo helps too!\n\nCover image by [Raphael Schaller](https://unsplash.com/@raphaelphotoch) on [Unsplash](https://unsplash.com)\n{: .note}\n",[722,790,9],{"slug":3709,"featured":6,"template":699},"ten-devops-terms","content:en-us:blog:ten-devops-terms.yml","Ten Devops Terms","en-us/blog/ten-devops-terms.yml","en-us/blog/ten-devops-terms",{"_path":3715,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3716,"content":3721,"config":3726,"_id":3728,"_type":13,"title":3729,"_source":15,"_file":3730,"_stem":3731,"_extension":18},"/en-us/blog/the-road-to-smarter-code-reviewer-recommendations",{"title":3717,"description":3718,"ogTitle":3717,"ogDescription":3718,"noIndex":6,"ogImage":2173,"ogUrl":3719,"ogSiteName":685,"ogType":686,"canonicalUrls":3719,"schema":3720},"The road to smarter code reviewer recommendations","Machine learning is coming to GitLab's code review process. Here's what you need to know, and how you can help!","https://about.gitlab.com/blog/the-road-to-smarter-code-reviewer-recommendations","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The road to smarter code reviewer recommendations\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Taylor McCaslin\"}],\n        \"datePublished\": \"2022-01-04\",\n      }",{"title":3717,"description":3718,"authors":3722,"heroImage":2173,"date":3723,"body":3724,"category":693,"tags":3725},[2570],"2022-01-04","\nYou may recall back in June 2021, we [announced the acquisition of UnReview](/press/releases/2021-06-02-gitlab-acquires-unreview-machine-learning-capabilities/), a machine learning (ML) based solution for automatically identifying appropriate expert [code reviewers](/stages-devops-lifecycle/create/) and controlling review workloads and distribution of knowledge.\n\nAt the start of the new year we wanted to provide an update on our integration progress and our wider vision of leveraging machine learning to make GitLab's [DevOps Platform](/solutions/devops-platform/) smarter. You see, the acquisition of UnReview also was the initial staffing of [our new ModelOps stage](/direction/modelops/).\n\n### Our Newest DevOps Stage\n\nThis new stage, which we’ve named ModelOps, is focused on enabling and empowering data science workloads on GitLab. GitLab ModelOps aims to bring data science into GitLab both within existing features to make them smarter and more intelligent, but also empowering GitLab customers to build and integrate data science workloads within GitLab.\n\nSo what is ModelOps you may wonder? We view ModelOps as an all encompassing term to cover the entire end to end lifecycle of artificial intelligence models. We wanted to set our vision wide to fully cover everything needed to power data science workloads. DataOps is the processing of data workloads (think traditional ELT: extract, load, transform) and MLOps is the building, training, and deployment of machine learning models. If you’re confused don’t worry, it’s a lot to wrap your head around.\n\n![a look at the stages of MLOps](https://about.gitlab.com/images/blogimages/MLops.png){: .shadow.small.center}\n\nToday our DevOps Platform helps plan, build, test, secure, deploy, and monitor traditional software. Now we want to extend our DevOps Platform to include AI and ML workloads. If this is interesting to you, be sure to check out our recent Contribute talk where we dive deeper into plans for our ModelOps stage.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/C08QVI99JLo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### UnReview as our first feature\n\nSo what does this have to do with UnReview? Our acquisition of UnReview is going to be our first [AI Assisted](/direction/ai-powered/) group’s feature: suggested reviewers within [GitLab’s existing reviewers experience](/blog/merge-request-reviewers/). Today, a developer in a merge request has to manually choose a reviewer to look at their code. With UnReview we can leverage the contribution history for a project and recommend someone well-suited for code review of your specific changes.\n\nHere’s an early mockup (and it may differ from our final UI) of how we’re thinking about this integration:\n\n![an early mockup of our UI](https://about.gitlab.com/images/blogimages/codereviewmockup.png){: .shadow.small.left}\n\nThe UnReview algorithm looks at a variety of data points from your project’s contribution history to suggest an appropriate reviewer. We’re still in the early days of this integration but our initial internal testing shows great suggestions.\n\n### Customer beta coming soon!\n\nThis leads me to a final question, might you want to be one of our first customers to try this new code review experience? In early 2022, we’ll begin a private customer beta of this new functionality. If interested, [fill out this form to express interest](https://docs.google.com/forms/d/e/1FAIpQLScpmCwpwyBr0GrXxBQ6vE02eokclFAs9lFk_g5dcyuGaHqFuQ/viewform). Do note that we can’t accept everyone and we’ll focus initially on customer profiles that are well suited for the initial version of the suggestion algorithm. Our only ask is we’d like to find customers with active projects that have a healthy number of contributors. The model currently works best on larger repositories with lots of contributors where it may not immediately be clear who is an ideal code reviewer.\n\nWe can’t wait for customers to begin using this new reviewer suggestion experience and will be providing more updates in early 2022.\n",[722,232,9,766],{"slug":3727,"featured":6,"template":699},"the-road-to-smarter-code-reviewer-recommendations","content:en-us:blog:the-road-to-smarter-code-reviewer-recommendations.yml","The Road To Smarter Code Reviewer Recommendations","en-us/blog/the-road-to-smarter-code-reviewer-recommendations.yml","en-us/blog/the-road-to-smarter-code-reviewer-recommendations",{"_path":3733,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3734,"content":3740,"config":3745,"_id":3747,"_type":13,"title":3748,"_source":15,"_file":3749,"_stem":3750,"_extension":18},"/en-us/blog/three-steps-to-optimize-software-value-streams",{"title":3735,"description":3736,"ogTitle":3735,"ogDescription":3736,"noIndex":6,"ogImage":3737,"ogUrl":3738,"ogSiteName":685,"ogType":686,"canonicalUrls":3738,"schema":3739},"GitLab's 3 steps to optimizing software value streams","Discover the power of GitLab Value Streams Dashboard (VSD) for optimizing software delivery workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667893/Blog/Hero%20Images/workflow.jpg","https://about.gitlab.com/blog/three-steps-to-optimize-software-value-streams","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab's 3 steps to optimizing software value streams\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2023-06-26\",\n      }",{"title":3735,"description":3736,"authors":3741,"heroImage":3737,"date":3742,"body":3743,"category":693,"tags":3744},[1660],"2023-06-26","\n\n\u003Ci>This is part three of our multipart series introducing you to the capabilities within GitLab Value Stream Management and the Value Streams Dashboard. In part one, [learn about the Total Time Chart](https://about.gitlab.com/blog/value-stream-total-time-chart/) and how to simplify top-down optimization flow with Value Stream Management. In part two, learn how to [get started with the Value Streams Dashboard](https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard/). \u003C/i>\n\nIt’s no news that software development is a complex process that involves many different stages, teams, and tools. With significant investments made in digital transformation and adopting new tools following the shift to remote work, measuring and managing the business value of the software development lifecycle (SDLC) have become more complex.\n\nThis is where Value Stream Management (VSM) comes in. VSM is a methodology that helps organizations optimize their software delivery process by visualizing, measuring, and improving the flow of value (a.k.a. the “value stream”) from ideation to production. Some examples are: the amount of time it takes to go from an idea to production, the velocity of the project, bottlenecks in the development process, and long-running issues or merge requests. As you’ve probably guessed from its title, this blog will cover how the [new capabilities of GitLab Value Streams Dashboard](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/#value-streams-dashboard-is-now-generally-available) can help you do all that, and optimize your software delivery.\n\n## Value Stream Management in a nutshell \nGitLab [VSM](https://about.gitlab.com/solutions/value-stream-management/) provides end-to-end visibility into your software delivery process. It enables you to [map out your value stream](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#create-a-value-stream-with-custom-stages), identify bottlenecks, measure key metrics, and identify the places where you are either lagging or doing exceptionally well. It then also allows you to take action on these insights. In essence, GitLab VSM helps you to understand and optimize your development processes to deliver software faster and better.\n\n![GitLab Value Stream Analytics](https://about.gitlab.com/images/blogimages/2023-05-24-vsm-overview.png){: .shadow}\nWith Value Stream Analytics, you can establish a baseline for measuring software delivery performance progress and identifying the touchpoints in the process that do not add value to the customer or your business.\n{: .note.text-center}\n\nAnd if you’re wondering how GitLab VSM is able to do that, it’s because GitLab provides an entire DevSecOps platform as a single application and, therefore, holds all the data needed to provide end-to-end visibility throughout the entire SDLC. So now, your decisions rely on actual data rather than blind estimation or gut feelings. Additionally, since GitLab is the place where work happens, these insights are also actionable, allowing your users to move from “understanding” to “fixing” at any time, from within their workflow and without losing context.\n\n## How VSM works: The three-step analysis\nLet’s take a look at how GitLab VSM helps you optimize your SDLC in three easy steps:\n\n**Step 1:** Get an end-to-end view across your entire organization and pinpoint the value streams you need to focus on.\n\nThe [Value Streams Dashboard](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html) is a centralized view where you can see and compare all of the SDLC metrics of all your organization's projects. This dashboard enables you to identify hotspots in your SDLC streams — projects or teams that are underperforming, with longer stages and cycle times. It also shows you where you have the largest value contributors, so you can identify and learn what is working well and what's not. With this information at hand, you can now prioritize your efforts and understand where to spend your time.\n\n![VSM illustration](https://about.gitlab.com/images/blogimages/2023-05-24_vsm1.gif){: .shadow}\n\n\nThis centralized UI acts as a single source of truth for your organization, where all the relevant stakeholders can access, view, and analyze the same set of metrics. This ensures everyone is on the same page, promoting consistency in analysis and decision-making.\n\nRead more: [Getting started with the new GitLab Value Streams Dashboard](https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard/)\n\n**Step 2:** Drill down into a specific project.\n\nWhen you select a project from the main dashboard, you are directed to that project's Value Stream Analytics (VSA), where you see its value stream. The project's metrics are presented for each stage of the project, helping you understand where the main work lies and which stages need improvement. The VSA overview provides valuable insights into lead times, cycle times, and other critical metrics that help you identify areas for optimization.\n\n![VSM illustration](https://about.gitlab.com/images/blogimages/2023-05-24_vsm2.gif){: .shadow}\n\n\nRead more: [Value stream management: Total Time Chart simplifies top-down optimization flow](https://about.gitlab.com/blog/value-stream-total-time-chart/)\n\n**Step 3:** Dive deep into the Value Stream Analytics dashboard to analyze and fix issues.\n\nOnce the main areas of interest are identified, GitLab Value Stream Analytics (VSA) enables you to drill down further into a specific stage of the project. In the stage table, you can sort the **Last event** column to view the most recent workflow event, and sort the items by **duration** so you can rearrange the events and gain insights faster. This way, you can easily detect work items that are slowing down the project in that stage. Here's an example how we dogfood [VSA on gitlab-org](https://gitlab.com/gitlab-org/gitlab/-/value_stream_analytics). \n\nYou can identify the owner of the work items responsible for the delays, examine code changes, and perform a comprehensive analysis of the issue. This level of visibility and traceability empowers you to take targeted actions and make the necessary improvements to optimize the value stream, all within the context of your current workflow.\n\n![VSM illustration](https://about.gitlab.com/images/blogimages/2023-05-24_vsm3.gif){: .shadow}\nUse GitLab Value Stream Management to visualize the progress of work from planning to value delivery, and gain actionable context.\n{: .note.text-center}\n\n## The value of Value Stream Management\nGitLab VSM is a powerful solution that fits seamlessly into your SDLC. By providing end-to-end visibility and granular, actionable insights into the value stream, VSM enables you to optimize your software delivery and provide value to your customers faster. Access the information you need, when you need it — and easily act on it from within your workplace. VSM offers you the best of both worlds: out-of-the-box functionality and the ability to customize features.\n\nSay goodbye to time-consuming searches and hello to instant access to the information you need most. To learn more, check out the [Value Stream Analytics documentation](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html).\n\nTo help us improve the Value Stream Management, please share feedback about your experience in this [survey](https://gitlab.fra1.qualtrics.com/jfe/form/SV_50guMGNU2HhLeT4).\n",[744,722,834,1074,9],{"slug":3746,"featured":6,"template":699},"three-steps-to-optimize-software-value-streams","content:en-us:blog:three-steps-to-optimize-software-value-streams.yml","Three Steps To Optimize Software Value Streams","en-us/blog/three-steps-to-optimize-software-value-streams.yml","en-us/blog/three-steps-to-optimize-software-value-streams",{"_path":3752,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3753,"content":3759,"config":3765,"_id":3767,"_type":13,"title":3768,"_source":15,"_file":3769,"_stem":3770,"_extension":18},"/en-us/blog/tips-for-managing-monorepos-in-gitlab",{"title":3754,"description":3755,"ogTitle":3754,"ogDescription":3755,"noIndex":6,"ogImage":3756,"ogUrl":3757,"ogSiteName":685,"ogType":686,"canonicalUrls":3757,"schema":3758},"5 Tips for managing monorepos in GitLab","Learn the benefits of operating a monolothic repository and how to get the most out of this structure.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667591/Blog/Hero%20Images/code-review-blog.jpg","https://about.gitlab.com/blog/tips-for-managing-monorepos-in-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 Tips for managing monorepos in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah Waldner\"}],\n        \"datePublished\": \"2022-07-12\",\n      }",{"title":3754,"description":3755,"authors":3760,"heroImage":3756,"date":3762,"body":3763,"category":832,"tags":3764},[3761],"Sarah Waldner","2022-07-12","\nGitLab was founded 10 years ago on Git because it is the market leading version control system. As [Marc Andressen pointed out in 2011](https://www.wsj.com/articles/SB10001424053111903480904576512250915629460), we see teams and code bases expanding at incredible rates, testing the limits of Git. Organizations are experiencing significant slowdowns in performance and added administration complexity working on enormous repositories or monolithic repositories. \n\n## Why do organizations develop on monorepos? \n\nGreat question. While [some](https://www.infoworld.com/article/3638860/the-case-against-monorepos.html) might believe that monorepos are a no-no, there are valid reasons why companies, including  Google or GitLab (that’s right! We operate a monolithic repository), choose to do so. The main benefits are: \n\n- Monorepos can reduce silos between teams, streamlining collaboration on design, development, and operation of different services because everything is within the same repository.\n- Monorepos help organizations standardize on tooling and processes. If a company is pursuing a DevOps transformation, a monorepo can help accelerate change management when it comes to new workflows or the rollout of new tools.\n- Monorepos simplify dependency management because all packages can be updated in a single commit.\n- Monorepos offer unified CI/CD and build processes. Having all services in a single repository means that you can set up one system of pipelines for everyone.\n\nWhile we still have a ways to go before monorepos or monolithic repositories are as easy to manage as multi-repos in GitLab, we put together five tips and tricks to maintain velocity while developing on a monorepo in GitLab.\n\n**1. Use CODEOWNERS to streamline merge request approvals**\n\nCODEOWNERS files live in the repository and assign an owner to a portion of the code, making it super efficient to process changes. Investing time in setting up a robust [CODEOWNERS file](https://docs.gitlab.com/ee/user/project/codeowners/) that you can then use to automate [merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/) from required people will save time down the road for developers. \n\nYou can then set your merge requests so they must be approved by Code Owners before merge. CODEOWNERS specified for the changed files in the merge request will be automatically notified.\n\n**2. Improve git operation performance with Git LFS**\n\nA universal truth of git is that managing large files is challenging. If you work in the gaming industry, I am sure you’ve been through the annoying process of trying to remove a binary file from the repository history after a well-meaning coworker committed it. This is where [Git LFS](https://docs.gitlab.com/ee/topics/git/lfs/#git-large-file-storage-lfs) comes in. Git LFS keeps all the big files in a different location so that they do not exponentially increase the size of a repository.\n\nThe GitLab server communicates with the Git LFS client over HTTPS. You can enable Git LFS for a project by toggling it in [project settings](https://docs.gitlab.com/ee/user/project/settings/index.html#configure-project-visibility-features-and-permissions). All files in Git LFS can be tracked in the GitLab interface. GitLab indicates what files are stored there with the LFS icon.\n\n**3. Reduce download time with partial clone operations**\n\n[Partial clone](https://docs.gitlab.com/ee/topics/git/partial_clone.html#partial-clone) is a performance optimization that allows Git to function without having a complete copy of the repository. The goal of this work is to allow Git to better handle extremely large repositories.\n\nAs we just talked about, storing large binary files in Git is normally discouraged, because every large file added is downloaded by everyone who clones or fetches changes thereafter. These downloads are slow and problematic, especially when working from a slow or unreliable internet connection.\n\nUsing partial clone with a file size filter solves this problem, by excluding troublesome large files from clones and fetches. \n\n**4. Take advantage of parent-child pipelines**\n\n[Parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html) are where one pipeline triggers a set of downstream pipelines in the same project. The downstream pipelines still execute in the same stages or sequence without waiting for other pipelines to finish. Additionally, child pipelines reduce the configuration to the child pipeline, making it easier to interpret and understand. For monorepos, using parent-child pipelines in conjunction with `rules:changes` will only run pipelines on specified files changes. This reduces wasted time running pipelines across the entire repository.  \n\n**5. Use incremental backups to eliminate downtime** \n\n[Incremental backups](https://docs.gitlab.com/ee/raketasks/backup_restore.html#incremental-repository-backups) can be faster than full backups because they only pack changes since the last backup into the backup bundle for each repository. This is super useful when you are working on a large repository and only developing on certain parts of the code base at a time.\n\n## Where we are headed\n\nWhile these tips have helped many customers migrate from other version control systems to GitLab, we know there is still room for improvement. Over the next year, you will see us working on the following projects. We’d LOVE to hear from you, so share your thoughts, ideas, or simply 👍 on an issue to help prioritize things that will make your life easier.\n\n- [Git for enormous repositories](https://gitlab.com/groups/gitlab-org/-/epics/773)\n- [Expand SAST scanner support for monorepos](https://gitlab.com/groups/gitlab-org/-/epics/4895)\n- [Allow Reports to be Namespace to support monorepos](https://gitlab.com/gitlab-org/gitlab/-/issues/299490)\n",[722,834,1074,767,9],{"slug":3766,"featured":6,"template":699},"tips-for-managing-monorepos-in-gitlab","content:en-us:blog:tips-for-managing-monorepos-in-gitlab.yml","Tips For Managing Monorepos In Gitlab","en-us/blog/tips-for-managing-monorepos-in-gitlab.yml","en-us/blog/tips-for-managing-monorepos-in-gitlab",{"_path":3772,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3773,"content":3778,"config":3783,"_id":3785,"_type":13,"title":3786,"_source":15,"_file":3787,"_stem":3788,"_extension":18},"/en-us/blog/top-10-gitlab-hacks",{"title":3774,"description":3775,"ogTitle":3774,"ogDescription":3775,"noIndex":6,"ogImage":3680,"ogUrl":3776,"ogSiteName":685,"ogType":686,"canonicalUrls":3776,"schema":3777},"Top ten GitLab hacks for all stages of the DevOps Platform","Get the most out of the GitLab DevOps Platform with our ten best tips for enhanced productivity.","https://about.gitlab.com/blog/top-10-gitlab-hacks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top ten GitLab hacks for all stages of the DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2021-10-19\",\n      }",{"title":3774,"description":3775,"authors":3779,"heroImage":3680,"date":3780,"body":3781,"category":693,"tags":3782},[2491],"2021-10-19","It's been ten years since the first commit to GitLab, so we are sharing our\nten favorite GitLab hacks to help you get the most out of our DevOps\nPlatform. These are tips for all stages of the development lifecycle, so\nroll up your sleeves and let's get started.\n\n\n## Manage faster with quick actions\n\n\nYou might have adopted keyboard shortcuts for faster navigation and\nworkflows already - if not, check out the GitLab documentation for [platform\nspecific shortcuts](https://docs.gitlab.com/ee/user/shortcuts.html). The\nknowledge of pressing `r` to land in the reply to comment in text form can\nbe combined with other quick actions, including:\n\n\n```\n\n/assign_reviewer @ \u003Csearch username>\n\n\n/label ~ \u003Csearch label>\n\n/label ~enhancement ~workflow::indev\n\n\n/due Oct 8\n\n\n/rebase\n\n\n/approve\n\n\n/merge \n\n```\n\n\nQuick actions are also helpful if you have to manage many issues, merge\nrequests and epics at the same time. There are specific actions which allow\nyou to duplicate existing issues, as one example. \n\n\nTake a deeper dive into [Quick\nActions](/blog/improve-your-gitlab-productivity-with-these-10-tips/). \n\n\n## Plan instructions with templates\n\n\nDon’t fall into the trap of back-and-forth with empty issue descriptions\nthat leave out details your development teams need to reproduce the error in\nthe best way possible. \n\n\nGitLab provides the possibility to use so-called [description\ntemplates](https://docs.gitlab.com/ee/user/project/description_templates.html)\nin issues and merge requests. Next to providing a structured template with\nheadings, you can also add [task\nlists](https://docs.gitlab.com/ee/user/markdown.html#task-lists) which can\nlater be ticked off by the assignee. Basically everything is possible and is\nsupported in GitLab-flavored markdown and HTML.\n\n\nIn addition to that, you can combine the static description templates with\nquick actions. This allows you to automatically set labels, assignees,\ndefine due dates, and more to level up your productivity with GitLab. \n\n\n```\n\n\u003C!-- \n\nThis is a comment, it will not be rendered by the Markdown engine. You can\nuse it to provide instructions how to fill in the template.\n\n--> \n\n\n### Summary \n\n\n\u003C!-- Summarize the bug encountered concisely. -->\n\n\n### Steps to reproduce\n\n\n\u003C!-- Describe how one can reproduce the issue - this is very important. -->\n\n\n### Output of checks\n\n\n\u003C!-- If you are reporting a bug on GitLab.com, write: This bug happens on\nGitLab.com -->\n\n\n#### Results of GitLab environment info\n\n\n\u003C!--  Input any relevant GitLab environment information if needed. -->\n\n\n\u003Cdetails>\n\n\u003Csummary>Expand for output related to app info\u003C/summary>\n\n\n\u003Cpre>\n\n\n(Paste the version details of your app here)\n\n\n\u003C/pre>\n\n\u003C/details>\n\n\n### Possible fixes\n\n\n\u003C!-- If you can, link to the line of code and suggest actions. →\n\n\n## Maintainer tasks\n\n\n- [ ] Problem reproduced\n\n- [ ] Weight added\n\n- [ ] Fix in test\n\n- [ ] Docs update needed\n\n\n/label ~\"type::bug\"\n\n```\n\n\nWhen you manage different types of templates, you can pass along the name of\nthe template in the `issuable_template` parameter, for example\n`https://gitlab.com/gitlab-org/gitlab/-/issues/new?issuable_template=Feature%20proposal%20%23%20lean`. \n\n\nAt GitLab, we use description and merge request templates in many ways:\n[GitLab the\nproject](https://gitlab.com/gitlab-org/gitlab/-/tree/master/.gitlab/issue_templates),\n[GitLab Corporate Marketing\nteam](https://gitlab.com/gitlab-com/marketing/corporate_marketing/corporate-marketing/-/tree/master/.gitlab/issue_templates),\n[GitLab team member\nonboarding](https://gitlab.com/gitlab-com/people-group/people-operations/employment-templates/-/tree/master/.gitlab/issue_templates)\nand [GitLab product\nteam](https://gitlab.com/gitlab-com/Product/-/tree/main/.gitlab/issue_templates)\nare just a few examples.\n\n\n## Create with confidence \n\n\nWhen reading GitLab issues and merge requests, you may see the abbreviation\n`MWPS` which means `Merge When Pipeline Succeeds`. This is an efficient way\nto merge the MRs when the pipeline passes all jobs and stages - you can even\ncombine this workflow with [automatically closing\nissues](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically)\nwith keywords from the MR.\n\n\n`Merge When Pipeline Succeeds` also works on the CLI with the `git` command\nand [push\noptions](https://docs.gitlab.com/ee/user/project/push_options.html). That\nway you can create a merge request from a local Git branch, and set it to\nmerge when the pipeline succeeds.\n\n\n```shell\n\n# mwps BRANCHNAME\n\nalias mwps='git push -u origin -o merge_request.create -o\nmerge_request.target=main -o merge_request.merge_when_pipeline_succeeds'\n\n```\n\n\nCheckout [this ZSH alias\nexample](https://gitlab.com/sytses/dotfiles/-/blob/745ef9725a859dd759059f6ce283e2a8132c9b00/git/aliases.zsh#L24)\nin our CEO [Sid Sijbrandij](/company/team/#sytses)’s dotfiles repository.\nThere are more push options available, and even more Git CLI tips in [our\ntools & tips\nhandbook](https://handbook.gitlab.com/handbook/tools-and-tips/#terminal).\nOne last tip: Delete all local branches where the remote branch was deleted,\nfor example after merging a MR.\n\n\n```shell\n\n# Delete all remote tracking Git branches where the upstream branch has been\ndeleted\n\nalias git_prune=\"git fetch --prune && git branch -vv | grep 'origin/.*:\ngone]' | awk '{print \\$1}' | xargs git branch -d\"\n\n```\n\n\nYou are not bound to your local CLI environment; take it to the cloud with\n[Gitpod](/blog/teams-gitpod-integration-gitlab-speed-up-development/)\nand either work in VS Code or the pod terminal. \n\n\n## Verify your CI/CD pipeline\n\n\nRemember the old workflow of committing a change to `.gitlab-ci.yml` just to\nsee if it was valid, or if the job template really inherits all the\nattributes? This has gotten a whole lot easier with our new [pipeline\neditor](https://docs.gitlab.com/ee/ci/pipeline_editor/). Navigate into the\n`CI/CD` menu and start building CI/CD pipelines right away.\n\n\nBut the editor is more than just another YAML editor. You’ll get live\nlinting, allowing you to know if there is a missing dash for array lists or\na wrong keyword in use before you commit. You can also preview jobs and\nstages or asynchronous dependencies with `needs` to make your pipelines more\nefficient.\n\n\nThe pipeline editor also uses uses the `/ci/lint` API endpoint, and fetches\nthe merged YAML configuration I described earlier in [this blog post about\njq and CI/CD\nlinting](/blog/devops-workflows-json-format-jq-ci-cd-lint/). That\nway you can quickly verify that job templates with\n[extends](https://docs.gitlab.com/ee/ci/yaml/#extends) and [!reference\ntags](https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html#reference-tags)\nwork in the way you designed them. It also allows you to unfold included\nfiles, and possible job overrides (for example changing the stage of an\n[included SAST security\ntemplate](https://docs.gitlab.com/ee/user/application_security/sast/#overriding-sast-jobs)).\n\n\nLet’s try a quick example – create a new project and new file called\n`server.c` with the following content: \n\n\n```\n\n#include \u003Cstdio.h>\n\n#include \u003Cstring.h>\n\n#include \u003Csys/mman.h>\n\n#include \u003Csys/stat.h>\n\n#include \u003Cunistd.h>\n\n\nint main(void) {\n    size_t pagesize = getpagesize();\n    char * region = mmap(\n        (void*) (pagesize * (1 \u003C\u003C 20)),\n        pagesize,\n        PROT_READ|PROT_WRITE|PROT_EXEC,\n        MAP_ANON|MAP_PRIVATE, 0, 0);\n\n    strcpy(region, \"Hello GitLab SAST!\");\n    printf(\"Contents of region: %s\\n\", region);\n\n    FILE *fp;\n    fp = fopen(\"devops.platform\", \"r\");\n    fprintf(fp, \"10 years of GitLab 🦊 🥳\");\n    fclose(fp);\n    chmod(\"devops.platform\", S_IRWXU|S_IRWXG|S_IRWXO);\n\n    return 0;\n}\n\n```\n\n\nOpen the CI/CD pipeline editor and add the following configuration, with an\nextra `secure` stage assigned to the `semgrep-sast` job for SAST and the C\ncode. \n\n\n```yaml\n\nstages:\n    - build\n    - secure\n    - test\n    - deploy\n\ninclude:\n    - template: Security/SAST.gitlab-ci.yml\n\nsemgrep-sast:\n    stage: secure\n```\n\n\nInspect the `Merged YAML tab` to see the fully compiled CI/CD configuration.\nYou can commit the changes and check the found vulnerabilities too as an\nasync practice :). The examples are available in [this\nproject](https://gitlab.com/gitlab-de/playground/sast-10y-example).\n\n\n![CI/CD Pipeline editor - Merged\nYAML](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_pipeline_editor_view_merged_yaml.png)\n\nVerify the stage attribute for the job by opening the `view merged YAML` tab\nin the CI/CD pipeline editor.\n\n{: .note.text-center}\n\n\n## Package your applications\n\n\nThe [package registry](https://docs.gitlab.com/ee/user/packages/)\npossibilities are huge and there are more languages and package managers to\ncome. Describing why Terraform, Helm, and containers (for infrastructure)\nand Maven, npm, NuGet, PyPI, Composer, Conan, Debian, Go and Ruby Gems (for\napplications) are so awesome would take too long, but it's clear there are\nplenty of choices. \n\n\nOne of my favourite workflows is to use existing CI/CD templates to publish\ncontainer images in the GitLab container registry. This makes continuous\ndelivery much more efficient, such as when deploying the application into\nyour Kubernetes cluster or AWS instances. \n\n\n```yaml\n\ninclude:\n  - template: 'Docker.gitlab-ci.yml'\n```\n\n\nIn addition to including the CI/CD template, you can also override the job\nattributes and define a specific stage and manual non-blocking rules.\n\n\n```yaml\n\nstages:\n  - build\n  - docker-build\n  - test\n\ninclude:\n  - template: 'Docker.gitlab-ci.yml'\n\n# Change Docker build to manual non-blocking\n\ndocker-build:\n  stage: docker-build\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH'\n      when: manual \n      allow_failure: true\n```\n\n\nFor celebrating #10YearsOfGitLab, we have created a [C++\nexample](https://gitlab.com/gitlab-de/cicd-tanuki-cpp) with an Easter egg on\ntime calculations. This project also uses a Docker builder image to showcase\na more efficient pipeline. Our recommendation is to learn using the\ntemplates in a test repository, and then create a dedicated group/project\nfor managing all required container images. You can think of builder images\nwhich include the compiler tool chain, or specific scripts to run end-to-end\ntests, etc. \n\n\n## Secure your secrets\n\n\nIt is easy to leak a secret by making choices that uncomplicate a unit test\nby running it directly with the production database. The secret persists in\ngit history, and someone with bad intentions gains access to private data,\nor finds ways to exploit your supply chain even further. \n\n\nTo help prevent that, include the CI/CD template for secret detection. \n\n\n```yaml\n\nstages:\n    - test\n\ninclude:\n  - template: Security/Secret-Detection.gitlab-ci.yml  \n```\n\n\nA known way to leak secrets is committing the `.env` file which stores\nsettings and secrets in the repository. Try the following snippet by adding\na new file `.env` and create a merge request.\n\n\n```\n\nexport AWS_KEY=\"AKIA1318109798ABCDEF\"\n\n```\n\n\nInspect the reports JSON to see the raw reports structure. GitLab Ultimate\nprovides an MR integration, a security dashboard overview, and more features\nto take immediate action. The example can be found in [this\nproject](https://gitlab.com/gitlab-de/playground/secret-scanning-10y-example).\n\n\n![Secrets Scanning in\nMR](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_secrets_scanning.png)\n\nMR detail view with detected AWS secret from security scanning\n\n{: .note.text-center}\n\n\n## Release and continuously deliver (CD)\n\n\nGitLab’s release stage provides many\n[features](https://about.gitlab.com/handbook/product/categories/features/#release),\nincluding [canary\ndeployments](https://docs.gitlab.com/ee/user/project/canary_deployments.html)\nand [GitLab pages](https://docs.gitlab.com/ee/user/project/pages/). There\nare also infrastructure deployments with Terraform and cloud native\n(protected) [environments](https://docs.gitlab.com/ee/ci/environments/). \n\n\nWhile working on a CI/CD pipeline efficiency workshop, I got enthusiastic\nabout [parent-child\npipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html#parent-child-pipelines)\nallowing non-blocking child pipelines into production, with micro services\nin Kubernetes as one example. \n\n\nLet’s try it! Create a new project, and add 2 child pipeline configuration\nfiles: `child-deploy-staging.yml` and `child-deploy-prod.yml`. The naming is\nimportant as the files will be referenced in the main `.gitlab-ci.yml`\nconfiguration file later. The jobs in the child pipelines will sleep for 60\nseconds to simulate a deployment. \n\n\nchild-deploy-staging.yml:\n\n\n```yaml\n\ndeploy-staging:\n    stage: deploy\n    script:\n        - echo \"Deploying microservices to staging\" && sleep 60\n```\n\n\nchild-deploy-prod.yml\n\n\n```yaml\n\ndeploy-prod:\n    stage: deploy\n    script:\n        - echo \"Deploying microservices to prod\" && sleep 60\n\nmonitor-prod:\n    stage: deploy\n    script:\n        - echo \"Monitoring production SLOs\" && sleep 60\n```\n\n\nNow edit the `.gitlab-ci.yml` configuration file and create a\nbuild-test-deploy stage workflow.\n\n\n```yaml\n\nstages:\n  - build\n  - test\n  - deploy\n\nbuild:\n  stage: build\n  script: echo \"Build\"\n\ntest:\n  stage: test \n  script: echo \"Test\"\n\ndeploy-staging-trigger:\n  stage: deploy\n  trigger:\n    include: child-deploy-staging.yml\n  #rules:\n  #  - if: $CI_MERGE_REQUEST_ID\n\ndeploy-prod-trigger:\n  stage: deploy\n  trigger:\n    include: child-deploy-prod.yml\n    #strategy: depend\n  #rules:\n  #  - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH   \n```\n\n\nCommit the changes and inspect the CI/CD pipelines. \n\n\n![Parent-child\nPipelines](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_parent_child_pipelines.png)\n\nView parent-child pipelines in GitLab\n\n{: .note.text-center}\n\n\n`strategy: depends` allows you to make the child pipelines blocking again,\nand the parent child pipeline waits again. Try uncommenting this for the\nprod job, and verify that by inspecting the pipeline view.\n[Rules](https://docs.gitlab.com/ee/ci/yaml/#rules) allow refining the scope\nwhen jobs are being run, such as when staging child pipelines that should\nonly be run in merge requests and the prod child pipeline only gets\ntriggered when on the default main branch. The full example can be found in\n[this\nproject](https://gitlab.com/gitlab-de/playground/parent-child-pipeline-10y-example).\n\n\nTip: You can use\n[resource_groups](/blog/introducing-resource-groups/) to limit\nproduction deployments from running concurrent child pipelines. \n\n\n## Configure your infrastructure\n\n\nTerraform allows you to describe, plan and apply the provisioning of\ninfrastructure resources. The workflow requires a state file to be stored\nover steps, where the [managed state in\nGitLab](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html)\nas an HTTP backend is a great help, together with predefined container\nimages and CI/CD templates to make [Infrastructure as\ncode](https://docs.gitlab.com/ee/user/infrastructure/iac/) as smooth as\npossible.\n\n\nYou can customize the template, or copy the CI/CD configuration into\n.gitlab-ci.yml and modify the steps by yourself. Let’s try a quick example\nwith only an AWS account and an IAM user key pair. Configure them as CI/CD\nvariables in `Settings > CI/CD > Variables`: `AWS_ACCESS_KEY_ID` and\n`AWS_SECRET_ACCESS_KEY`.\n\n\nNext, create the `backend.tf` file and specify the http backend and AWS\nmodule dependency.\n\n\n```terraform\n\nterraform {\n  backend \"http\" {\n  }\n\n  required_providers {\n    aws = {\n      source = \"hashicorp/aws\"\n      version = \"~> 3.0\"\n    }\n  }\n}\n\n```\n\n\nCreate `provider.tf` to specify the AWS region.\n\n\n```terraform\n\nprovider \"aws\" {\n  region = \"us-east-1\"\n}\n\n```\n\n\nThe `main.tf` describes the S3 bucket resources.\n\n\n```terraform\n\nresource \"aws_s3_bucket_public_access_block\" \"publicaccess\" {\n  bucket = aws_s3_bucket.demobucket.id\n  block_public_acls = false\n  block_public_policy = false\n}\n\n\nresource \"aws_s3_bucket\" \"demobucket\" {\n  bucket = \"terraformdemobucket\"\n  acl = \"private\"\n}\n\n```\n\n\nTip: You can verify the configuration locally on your CLI by commenting out\nthe HTTP backend above.\n\n\nFor GitLab CI/CD, open the pipeline editor and use the following\nconfiguration: (Note that it is important to specify the `TF_ROOT` and\n`TF_ADDRESS` variables since you can [manage multiple Terraform state\nfiles](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html#configure-the-backend)). \n\n\n```yaml\n\nvariables:\n  TF_ROOT: ${CI_PROJECT_DIR}\n  TF_ADDRESS: ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/terraform/state/${CI_PROJECT_NAME}\n\ninclude:\n    - template: Terraform.latest.gitlab-ci.yml\n\nstages:\n  - init\n  - validate\n  - build\n  - deploy\n  - cleanup\n\ndestroy:\n    stage: cleanup\n    extends: .terraform:destroy \n    when: manual\n    allow_failure: true\n```\n\n\nCommit the configuration and inspect the pipeline jobs. \n\n\n![Terraform pipeline AWS S3\nbucket](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_terraform_state_cicd_pipeline_aws_s3_bucket.png)\n\nAWS S3 bucket provisioned with Terraform in GitLab CI/CD \n\n{: .note.text-center}\n\n\nThe `destroy` job is not created in the template and therefore explicitly\nadded as a manual job. It is recommended to review the opinionated Terraform\nCI/CD template and copy the jobs into your own configuration to allow for\nfurther modifications or style adjustments.  The full example is located in\n[this\nproject](https://gitlab.com/gitlab-de/playground/terraform-aws-state-10y-example).\n\n\n![GitLab managed Terraform\nstates](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_terraform_state_cicd_overview.png)\n\nView the Terraform states in GitLab\n\n{: .note.text-center}\n\n\nHat tipping to our Package stage - you can manage and publish [Terraform\nmodules in the\nregistry](https://docs.gitlab.com/ee/user/packages/terraform_module_registry/)\ntoo, using all of the DevOps Platform advantages. And hot off the press, the\n[GitLab Kubernetes Operator is generally\navailable](/blog/open-shift-ga/). \n\n\n## Monitor GitLab and dive into Prometheus\n\n\nPrometheus is a monitoring solution which collects metrics from `/metrics`\nHTTP endpoints made available by applications, as well as so-called\nexporters to serve services and host information in the specified metrics\nformat. One example is CI/CD pipeline insights to analyse bottlenecks and\n[make your pipelines more\nefficient](https://docs.gitlab.com/ee/ci/pipelines/pipeline_efficiency.html).\nThe [GitLab CI Pipeline Exporter\nproject](https://github.com/mvisonneau/gitlab-ci-pipelines-exporter/tree/main/examples/quickstart)\nhas a great quick start in under 5 minutes, bringing up demo setup with\nDocker-compose, Prometheus and Grafana. From there, it is not far into your\nproduction monitoring environment, and monitoring more of GitLab. \n\n\n![GitLab CI\nExporter](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_ci_pipeline_exporter_prometheus.png)\n\nExample dashboard for the GitLab CI Pipeline Exporter\n\n{: .note.text-center}\n\n\nThe Prometheus Exporter uses the [Go client\nlibraries](https://prometheus.io/docs/instrumenting/writing_exporters/).\nThey can be used to write your own exporter, or instrument your application\ncode to expose `/metrics`. When deployed, you can use Prometheus again to\nmonitor the performance of your applications in Kubernetes, as one example.\nFind more monitoring ideas in my talk “[From Monitoring to Observability:\nLeft Shift your\nSLOs](https://docs.google.com/presentation/d/1LPb-HPMgbc8_l98VjMEo5d0uYlnNnAtJSURngZPWDdE/edit)”. \n\n\n## Protect\n\n\nYou can enable security features in GitLab by including the CI/CD templates\none by one. A more easy way is to enable [Auto\nDevOps](https://docs.gitlab.com/ee/topics/autodevops/) and use the default\nbest practices for [security\nscans](https://docs.gitlab.com/ee/user/application_security/index.html#security-scanning-with-auto-devops).\nThis includes [container\nscanning](https://docs.gitlab.com/ee/topics/autodevops/stages.html#auto-container-scanning)\nensuring that application deployments are not vulnerable on the container OS\nlevel. \n\n\nLet’s try a quick example with a potentially vulnerable image, and the\nDocker template tip from the Package stage above. Create a new `Dockerfile`\nin a new project:\n\n\n```yaml\n\nFROM debian:10.0 \n\n```\n\n\nOpen the pipeline editor and add the following CI/CD configuration:\n\n\n```yaml\n\n# 1. Automatically build the Docker image\n\n# 2. Run container scanning.\nhttps://docs.gitlab.com/ee/user/application_security/container_scanning/index.html\n\n# 3. Inspect `Security & Compliance > Security Dashboard`\n\n\n# For demo purposes, scan the latest tagged image from 'main'\n\nvariables:\n    DOCKER_IMAGE: $CI_REGISTRY_IMAGE:latest    \n\ninclude:\n    - template: Docker.gitlab-ci.yml\n    - template: Security/Container-Scanning.gitlab-ci.yml\n```\n\n\nThe full example is located in [this\nproject](https://gitlab.com/gitlab-de/playground/container-scanning-10y-example).\n\n\nTip: Learn more about [scanning container images in a deployed Kubernetes\ncluster](https://docs.gitlab.com/ee/user/application_security/container_scanning/)\nto stay even more safe. \n\n\n![Container Scanning Vulnerability\nReport](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_container_scanning_vulnerability_report.png)\n\nView the container scanning vulnerability report\n\n{: .note.text-center}\n\n\n## What’s next?\n\n\nWe have tried to find a great “hack” for each stage of the DevOps lifecycle.\nThere are more hacks and hidden gems inside GitLab - share yours and be\nready to explore more stages of the DevOps Platform.\n\n\nCover image by [Alin Andersen](https://unsplash.com/photos/diUGN5N5Rrs) on\n[Unsplash](https://unsplash.com)\n",[1035,722,9],{"slug":3784,"featured":6,"template":699},"top-10-gitlab-hacks","content:en-us:blog:top-10-gitlab-hacks.yml","Top 10 Gitlab Hacks","en-us/blog/top-10-gitlab-hacks.yml","en-us/blog/top-10-gitlab-hacks",{"_path":3790,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3791,"content":3797,"config":3802,"_id":3804,"_type":13,"title":3805,"_source":15,"_file":3806,"_stem":3807,"_extension":18},"/en-us/blog/top-10-gitlab-workflow-hacks-you-need-to-know",{"title":3792,"description":3793,"ogTitle":3792,"ogDescription":3793,"noIndex":6,"ogImage":3794,"ogUrl":3795,"ogSiteName":685,"ogType":686,"canonicalUrls":3795,"schema":3796},"Top 10 GitLab workflow hacks you need to know","A GitLab product manager shares her favorite tricks to navigate quickly and efficiently around the GitLab DevSecOps Platform and to boost team collaboration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099361/Blog/Hero%20Images/Blog/Hero%20Images/lightvisibility_lightvisibility.png_1750099361252.png","https://about.gitlab.com/blog/top-10-gitlab-workflow-hacks-you-need-to-know","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top 10 GitLab workflow hacks you need to know\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Amanda Rueda\"}],\n        \"datePublished\": \"2024-04-09\",\n      }",{"title":3792,"description":3793,"authors":3798,"heroImage":3794,"date":3799,"body":3800,"category":693,"tags":3801},[1880],"2024-04-09","In the world of software development, efficiency isn't just about moving fast – it's about smart navigation. As a GitLab product manager, I truly understand the value of efficiency when working within the DevSecOps platform. These are my top 10 favorite GitLab features and they might be the workflow hacks you never knew you needed.\n\nLet's dive into these hidden gems to unlock a new level of productivity and collaboration within your team.\n\n## 1. Resolve comments\n\nNot just for merge requests! Resolving comments on issues can significantly reduce noise and streamline task management. It's particularly handy for managing feedback efficiently.\n\n> **Why do I love it?** Not only does resolving comments reduce the noise on an issue, but it’s also a great way to manage tasks.\n>\n> **Use case.** Resolving comments is a great tool for issues where you are collecting feedback – respond to the feedback and provide a link, resolve the comment, and move on to the next one.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/discussions/#resolve-a-thread)__\n\n![example of resolve comments - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099376147.gif)\n\n\u003Cp>\u003C/p>\n\n## 2. Internal comments\n\nSpeak directly to your team without an external audience. Keep discussions private within an issue or merge request with comments visible only to your team members. It's the perfect balance between transparency and privacy.\n\n> **Why do I love it?** It balances privacy with transparency, while keeping the broader discussion open for the community.\n>\n> **Use case.** When coordinating a product launch, your marketing team can use internal comments to discuss and refine messaging and strategy. This keeps your discussions centralized and easily accessible to the team while in draft mode.\n>\n> **[How-to documentation](https://docs.gitlab.com/ee/user/discussions/#add-an-internal-note)**\n\n![internal comments example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099376148.png)\n\n\u003Cp>\u003C/p>\n\n## 3. And/or in filters\n\nWhen searching records on a listing page, using and/or filters can help you slice through the noise and find exactly what you're looking for quickly and efficiently.\n\n> **Why do I love it?** Perfect for finding exactly what you need, powering efficient and streamlined workflows.\n>\n>**Use case.** Search for feature issues related to a specific initiative that are assigned to specific groups.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#filter-with-the-or-operator)__\n\n![and/or filter example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/and_or__1__aHR0cHM6_1750099376152.gif)\n\n\u003Cp>\u003C/p>\n\n## 4. Auto expand URLs\n\nAppending '+' or '+s' to the end of a GitLab URL transforms it into an informative snippet, allowing you to share progress without forcing your teammates to leave the page.\n\n> **Why do I love it?** It's like having x-ray vision for URLs – see the important stuff without even clicking!\n>\n> **Use case.** Sharing progress in comments? Just add '+s' to the link, and boom – everyone's instantly on the same page.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/markdown.html#show-the-issue-merge-request-or-epic-title-in-the-reference)__\n\n![auto expand URLs example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750099376154.gif)\n\n\u003Cp>\u003C/p>\n\n## 5. Quick actions\n\nWith simple text commands, quick actions let you perform tasks like assigning users, adding labels, and more, directly from the description or comment box, saving you clicks and time.\n\n> **Why do I love it?** Saves clicks and time.\n>\n> **Use case.** When creating a new issue I use quick actions to automatically add labels, a milestone, and connect to the epic upon saving the record.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/project/quick_actions.html)__\n\n![quick actions example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750099376156.gif)\n\n\u003Cp>\u003C/p>\n\n## 6. Bulk edit\n\nApply labels, change assignees, or update milestones for multiple issues at once. This feature turns potentially tedious updates into a breeze, allowing for quick adjustments across numerous issues.\n\n> **Why do I love it?** Because it turns tedious updates into quick updates!\n>\n> **Use case.** Need to tag the whole sprint's issues as Review needed? Just filter, select all, and add that label in bulk – easy peasy.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#bulk-edit-issues-from-a-project)__\n\n![bulk edit example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099376157.gif)\n\n\u003Cp>\u003C/p>\n\n## 7. Epic swimlanes\n\nGroup issues under epics on your board to visually track and discuss progress. It's a powerful way to contextualize work during reviews or standups.\n\n> **Why do I love it?** Easily understand the context of work as you’re walking the board.\n>\n> **Use case.** Group by epic during standup reviews to easily piece together work with its parent initiative.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/project/issue_board.html#group-issues-in-swimlanes)__\n\n![epic swimlanes example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099376158.gif)\n\n\u003Cp>\u003C/p>\n\n## 8. Wiki diagrams\n\nIllustrate ideas and workflows directly in your wiki pages with easy-to-create diagrams. This feature supports visual learning and simplifies complex concepts.\n\n> **Why do I love it?** It’s incredibly user-friendly and flexible.\n>\n> **Use case.** When outlining a new feature workflow, draw it directly in the wiki page, making it crystal clear for everyone on the team.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/administration/integration/diagrams_net.html)__\n\n![wiki diagrams example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099376159.gif)\n\n\u003Cp>\u003C/p>\n\n## 9. Table creation\n\nForget about wrestling with markdown for table creation. The rich text editor lets you effortlessly insert and format tables, making documentation cleaner and more structured.\n\n> **Why do I love it?** It turns the table creation ordeal into a breeze, making updates clean and structured with just a few clicks.\n>\n> **Use case.** Compiling a sprint retro? Quickly insert a table to organize feedback, action items, and owners, making the review process smoother for everyone.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/rich_text_editor.html#tables)__\n\n![table creation example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750099376160.gif)\n\n\u003Cp>\u003C/p>\n\n## 10. Video and GIF embeds\n\nEnhance your issues and epic descriptions or comments with embedded GIFs and YouTube videos, adding a dynamic layer to your communication.\n\n> **Why do I love it?** Sometimes a GIF or video speaks better than words.\n>\n> **Use case.** Trying to explain a UI bug? Embed a YouTube video for a quick walkthrough of the proposed feature enhancement.\n\n![video and gif embed example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/gif__1__aHR0cHM6_1750099376161.gif)\n\n\u003Cp>\u003C/p>\n\n## Explore these features\n\nThese features represent just the tip of the iceberg in GitLab's comprehensive toolkit designed to boost efficiency and foster better collaboration. While they may be underutilized, their impact on your workflow could be substantial. I encourage you to explore these features further and integrate them into your daily routines.\n\n> Are you excited to power your DevSecOps workflow using GitLab? [Try GitLab Ultimate for free for 30 days](https://gitlab.com/-/trial_registrations/new).\n",[1035,495,834,9],{"slug":3803,"featured":6,"template":699},"top-10-gitlab-workflow-hacks-you-need-to-know","content:en-us:blog:top-10-gitlab-workflow-hacks-you-need-to-know.yml","Top 10 Gitlab Workflow Hacks You Need To Know","en-us/blog/top-10-gitlab-workflow-hacks-you-need-to-know.yml","en-us/blog/top-10-gitlab-workflow-hacks-you-need-to-know",{"_path":3809,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3810,"content":3816,"config":3821,"_id":3823,"_type":13,"title":3824,"_source":15,"_file":3825,"_stem":3826,"_extension":18},"/en-us/blog/top-five-takeaways-from-the-developer-survey",{"title":3811,"description":3812,"ogTitle":3811,"ogDescription":3812,"noIndex":6,"ogImage":3813,"ogUrl":3814,"ogSiteName":685,"ogType":686,"canonicalUrls":3814,"schema":3815},"Top 5 takeaways from the 2018 Developer Survey","GitLab's director of product marketing discusses the challenges facing DevOps adoption and other key findings from our 2018 Developer Survey.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680105/Blog/Hero%20Images/top-five-takeaways-blog-image.jpg","https://about.gitlab.com/blog/top-five-takeaways-from-the-developer-survey","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top 5 takeaways from the 2018 Developer Survey\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2018-05-14\",\n      }",{"title":3811,"description":3812,"authors":3817,"heroImage":3813,"date":3818,"body":3819,"category":718,"tags":3820},[1680],"2018-05-14","\n_Our [2022 Global DevSecOps Survey](/developer-survey/) has the latest insights from over 5,000 DevOps professionals._\n\nWhile the merits of cross-functional workflows are becoming more accepted in the software development space, it still has quite a way to go. In fact, [GitLab’s survey of 5,000 software professionals](/developer-survey/previous/2018/) found that only 23 percent of respondents are working with a DevOps workflow.\n\nThis is one of five top takeaways from the annual report on software development trends and the impact continuous integration and automation have on the way IT teams work.\n\n- [What’s in the webcast](#whats-in-the-webcast)\n- [Watch the recording](#watch-the-recording)\n- [Top takeaways](#top-takeaways)\n\n## What’s in the webcast\n\nThe discussion kicks off with the differing outlooks managers and developers have on DevOps adoption and the source of bottlenecks in the development process. We move on to highlight the distinctions between high- and low-performing teams and the role open source tools have in software development. The discussion then delves into the way continuous integration helps teams get working code out of the door faster.\n\n## Watch the recording\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/7hgoeV6LcFo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Top takeaways\n\n### Managers are more optimistic about their DevOps adoption progress than developers\n\n>Companies tend to look at DevOps as the next transformational methodology that's going to solve all software delivery problems, and of course there's a lot of truth in that when done really well. What we're finding is when you actually go and survey these organizations, managers and the management layer seem to have a more optimistic view of how they are progressing and what they can do with it. And though developers find the promise in it, they tend to agree less with the optimism of management. From our perspective, that makes a lot of sense because developers are in the trenches tooling, retooling, trying to configure, making that CD pipeline work, always kind of running into different roadblocks and trying to solve that all the time. So, although they're excited, I think their viewpoint is not necessarily as rosy about it when compared to management.\n\n### Developers say most delays in the development process are in the testing phase, while managers say the majority of bottlenecks are attributed to the planning process\n\n>Everybody acknowledges that there are bottlenecks and delays in this development pipeline. When doing DevOps, you still get stuck. But where they actually encounter these delays and bottlenecks varied from team to team. The majority of this was in testing, the next one was planning. Development, operations, and practitioner teams actually found most of the bottlenecks and delays in their actual phases of work, whether this was testing the plan to production, etcetera. Management was found to be more frustrated and concerned about the planning phase of getting things kick started. - Ashish Kuthiala\n\n>Fifty-two percent of people say that testing is where they encounter the most delays. I don't think that's a number to be taken lightly. This is why continuous testing, automated testing is such a big piece of the DevOps software development lifecycle. If that's the single biggest cause for delay and we can automate more of that testing, the time it takes has got to come down. - Alan Shimel\n\n### Open source tools play an integral role in the software development process\n\n>We're finding that open source tools are becoming a very critical component that developers choose to help solve their problems. People are starting to look at tools that they can integrate with their stack and modify or contribute to; and they want to be recognized as well. So they're starting to turn to tools that are malleable, tools that they can use and understand what's underneath the hood. There's a good community around open source because as developers face problems, they can ask their peers for help and also help others. - Ashish Kuthiala\n\n### Teams that self-identify as high performing do DevOps well\n\n>Teams that move fast work on smaller pieces of code and get them out of production quickly, i.e. they do DevOps well and they assess themselves as higher performing teams ...\nFor these teams that do well, we found that removing roadblocks in the development process starts with continuous integration. If you are doing continuous integration well and automating that portion of the lifecycle along with others, it makes a huge impact in removing bottlenecks. You have to ship and get the code or the configuration change production ready right away. The more you wait, the more it piles it up and the harder it becomes. - Ashish Kuthiala\n\nPhoto by [Caspar Rubin](https://unsplash.com/photos/fPkvU7RDmCo) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[722,720,1137,9,696],{"slug":3822,"featured":6,"template":699},"top-five-takeaways-from-the-developer-survey","content:en-us:blog:top-five-takeaways-from-the-developer-survey.yml","Top Five Takeaways From The Developer Survey","en-us/blog/top-five-takeaways-from-the-developer-survey.yml","en-us/blog/top-five-takeaways-from-the-developer-survey",{"_path":3828,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3829,"content":3835,"config":3840,"_id":3842,"_type":13,"title":3843,"_source":15,"_file":3844,"_stem":3845,"_extension":18},"/en-us/blog/trends-in-test-automation",{"title":3830,"description":3831,"ogTitle":3830,"ogDescription":3831,"noIndex":6,"ogImage":3832,"ogUrl":3833,"ogSiteName":685,"ogType":686,"canonicalUrls":3833,"schema":3834},"3 Trends in test automation","Faster deployments, fewer bugs, better user experiences – see the latest trends in test automation and what they're bringing to the table.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663662/Blog/Hero%20Images/trends-in-test-automation.jpg","https://about.gitlab.com/blog/trends-in-test-automation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 Trends in test automation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-05-01\",\n      }",{"title":3830,"description":3831,"authors":3836,"heroImage":3832,"date":3837,"body":3838,"category":718,"tags":3839},[1113],"2019-05-01","\nAutomation is becoming a powerful tool in every industry.\nWith the pace of development at breakneck speed, [test automation](/topics/devops/devops-test-automation/) is a big asset in deploying applications quickly.\nThe volume and complexity of testing environments mean that machines are well-suited for the job, and a modern QA strategy is all about leveraging that automation effectively.\n\n[QASymphony recently surveyed testers and QA leaders](https://www.qasymphony.com/blog/test-automation-trends-infographic/) at mid-size and large enterprises and found that a significant number of respondents expect to be making a big leap towards test automation in the next year:\nAlmost half expect to be automating more than 50 percent in that time.\nThe test automation tool landscape is growing more complex, and 83 percent of organizations are using open source tools.\n\n## 1. Continuous testing\n\nIn traditional environments, testing gets completed at the end of a development cycle.\nAs more teams move toward a [DevOps](/topics/devops/) and [continuous delivery](/topics/ci-cd/) model in which software is constantly in development, leaving testing until the end can be a huge liability.\nIn the time between a project starting and going to testing, master files could have been changed thousands of times.\nWho knows what kinds of bugs can pop up over months of development?\nThis leads to either updates stuck in testing for far too long or deployments filled with bugs – neither of which is good.\nThat’s where continuous testing comes in.\n\nContinuous testing starts at the beginning.\nEach milestone along the way serves as a quality gate, [baking in excellence at each stage of the software development process](https://techbeacon.com/app-dev-testing/state-test-automation-7-key-trends-watch).\nAs each phase clears, more testing happens as needed.\nImplementing continuous testing methodologies is _already_ the biggest trend in test automation, but some organizations that embark on their DevOps journeys struggle with it.\n\nSubu Baskaran, senior product manager for Sencha, says that despite the desire to test early in the cycle, software development teams that are still maintaining legacy applications find it hard to go back and write unit or end-to-end tests:\n\n>\"The millions of lines of code make it extremely difficult for teams to think about unit testing, as that will severely hamper new feature development. Also, legacy applications have inherent complexities that make end-to-end testing very slow, vague, and brittle. [Hence, teams that maintain legacy applications resort to manual testing.](https://techbeacon.com/app-dev-testing/state-continuous-testing-its-journey-not-destination)\"\n\n## 2. Concurrent DevOps\n\nCode quality and speed go hand in hand, and teams must be able to make use of parallelization to keep up the pace.\nSplitting work across multiple servers has never been easier, and organizations will continue to expand their concurrent DevOps approach.\n\nYou could have multiple physical machines to handle the load but [VMs can be a more economical option for automation parallelization](https://techbeacon.com/app-dev-testing/parallelizing-test-automation-read-first).\nWhether those VMs are on premises or cloud-based largely depends on the cost and your company's ability to embrace the cloud.\n\nYou could also work with cloud partners, companies that host cloud-based execution environments\nfor testing and automation.\n\nAutoscaling is one way that teams can reduce the costs associated with running these concurrent jobs.\n[Autoscaling runners](/releases/2016/03/29/gitlab-runner-1-1-released/) split this work across multiple servers and spin up or down automatically to process queues – so developers don’t have to wait on builds and teams use as much capacity as needed.\nThis user [built out a CI testing pipeline using GitLab](https://medium.freecodecamp.org/4-steps-to-build-an-automated-testing-pipeline-with-gitlab-ci-24ccab95535e) that allowed for more effective bug catching, and more DevOps teams will be using these methods to automate their testing environments in years to come.\n\n## 3. AI and machine learning\n\nAt its core, machine learning is a pattern-recognition technology, [the main purpose of which is to make machines learn without being explicitly programmed](https://hackernoon.com/why-ai-ml-will-shake-software-testing-up-in-2019-b3f86a30bcfa).\nWhat makes this such an important trend in test automation is that it can make testing more predictive and reliable.\nWhile Selenium is still the standard for creating testing scripts, it requires a high level of programming skill to maintain.\nAutomation tools like Mabl, [TestCraft](https://www.testcraft.io/), Testim.io, and AutonomiQ are just some of the few incorporating AI and machine learning into test automation.\n\nDan Belcher, co-founder of testing tool company Mabl, and his team [developed an ML testing algorithm that can adapt to changes in frontend elements](https://techbeacon.com/app-dev-testing/how-ai-changing-test-automation-5-examples).\n\"Although Selenium is the most broadly used framework, the challenge with it is that it's pretty rigidly tied to the specific elements on the front end. Because of this, script flakiness can often arise when you make what seems like a pretty innocent change to a UI.\" he explains.\n\"One of the things that we did at the very beginning of creating Mabl was to develop a much smarter way of referring to frontend elements in our test automation so that those types of changes don't actually break your tests.\"\n\nAI and machine learning make it possible to go through millions of lines of code and identify patterns.\nBut what happens to the human testers? QA automation means that testers can devote more time to superior user experiences – the tasks that machines are _not_ always well-suited for.\nThe role of testers is now [ensuring that quality testing processes are being followed](https://www.qasymphony.com/blog/managing-testing-teams/), so it’s more about oversight than conducting actual tests.\nModern QA can be that bridge for beautiful user experiences that are intuitive and appealing.\nWith the volume of applications being deployed every day, having a great user experience is a way to stand out in a sea of apps.\n\n## These trends in test automation are just the tip of the iceberg\n\nThere is no shortage of exciting things happening: more focus on JavaScript testing, improvements in testing across devices, comprehensive testing dashboards, as well as Selenium-free options.\nThe testing automation landscape is full of new solutions, but none of them is viable in an outdated legacy environment.\n\nManual testing reduces application development speed and threatens code quality.\nThese two disadvantages are growth killers, especially in such a competitive development landscape.\nTest automation makes it possible for testers to use their skills where they add more business value: Creating great user experiences.\nLegacy applications can’t tap into all of these test automation capabilities because they aren’t supported.\nOrganizations forced to manually test their code are being left in the dust by those who automate.\n\nThe advantage of using a solution like GitLab is that we can incorporate a variety of continuous testing solutions.\nCustomers have integrated us with SaaS-based testing solutions or even their own homegrown Selenium grids.\nWe also integrate with JavaScript platforms like Cypress.io, and help teams create continuous integration pipelines.\n\nAre you ready to explore these trends in test automation but legacy applications are holding you back?\n\n[Just commit.](/blog/application-modernization-best-practices/)\n{: .alert .alert-gitlab-purple .text-center}\n\nCover image by [Mimi Thian](https://unsplash.com/photos/ZKBzlifgkgw?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/%22developers%22?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[722,108,9],{"slug":3841,"featured":6,"template":699},"trends-in-test-automation","content:en-us:blog:trends-in-test-automation.yml","Trends In Test Automation","en-us/blog/trends-in-test-automation.yml","en-us/blog/trends-in-test-automation",{"_path":3847,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3848,"content":3853,"config":3860,"_id":3862,"_type":13,"title":3863,"_source":15,"_file":3864,"_stem":3865,"_extension":18},"/en-us/blog/triage-issues-gitmate",{"title":3849,"description":3850,"ogTitle":3849,"ogDescription":3850,"noIndex":6,"ogImage":2466,"ogUrl":3851,"ogSiteName":685,"ogType":686,"canonicalUrls":3851,"schema":3852},"Triage issues in 7 simple steps","Guest authors Lasse Shuirmann and Sebastian Latacz walk us through how to work through your issue backlog and triage effectively.","https://about.gitlab.com/blog/triage-issues-gitmate","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Triage issues in 7 simple steps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sebastian Latacz\"},{\"@type\":\"Person\",\"name\":\"Lasse Schuirmann\"}],\n        \"datePublished\": \"2017-10-26\",\n      }",{"title":3849,"description":3850,"authors":3854,"heroImage":2466,"date":3857,"body":3858,"category":811,"tags":3859},[3855,3856],"Sebastian Latacz","Lasse Schuirmann","2017-10-26","\n\nActively triaging issues is crucial for keeping an overview on your repository, yet it’s tedious and takes up valuable developer hours. That’s why we summarized seven simple steps to help you triage efficiently.\n\n\u003C!-- more -->\n\n## Preparation\n\nThere are three types of issues: questions, bug reports, and feature requests. Define which you want to tackle in your tracker and which you handle elsewhere (you can use different [GitLab Issue Boards](/stages-devops-lifecycle/issueboard/) to help keep different types of issues together). Once this has been done, check each issue with the following scheme:\n\n## 1. Filter noise\n\nCheck whether the issue is the type you want in your tracker (as defined in the preparation phase). If not, point the user to the right place or move it to the relevant [issue board](/stages-devops-lifecycle/issueboard/) yourself. For example, indicate that questions will be answered on Stack Overflow or feature requests are best being posted for discussion in the Slack channel. Be friendly; remember the user just provided valuable feedback. Close the issue once you’ve pointed the user to the right place.\n\n## 2. Look for similars\n\nOftentimes work related to existing issues already exists. Searching your issue tracker for related keywords can bring up a lot of similar issues that can be helpful. Reference the existing issues in the new one.\n\n## 3. Look for duplicates\n\nWhile you are researching similar topics you might find or remember duplicate issues as well – in that case simply close those (or the new issue) and streamline your efforts in one place.\n\n## 4. Retrieve missing information\n\nIssues are often reported incomplete; critical information like a version number is not given and it turns out that a bug occurred in an unsupported version – ask people for missing information and close issues if that is not provided.\n\n## 5. Label\n\nLabel issues so you can find those which are relevant for a particular topic with a filter. Also set labels for states of an issue. For example, putting a `needs-info` label on an issue prevents other people from wasting their time on it.\n\n## 6. Ping related devs\n\nEspecially for bigger changes or if it's not obvious how to tackle an issue, you will want to cc developers who are knowledgeable in the area. This can prevent you from running against three walls after each other and make sure all related efforts are coordinated properly.\n\n## 7. Handle stale issues\n\nEvery issue has to die. If you're thinking about closing an issue you should probably close it. Also close issues where you have been waiting for an answer for more than 30 days. Be friendly while doing so. The user can always reopen it if needed. This will prevent your tracker from cluttering.\n\nUpdate 2020-06-29: This post originally included information about automating issue triage using GitMate.io. Please note that GitMate.io was deprecated in January 2019 and references to the project have therefore been removed.\n{: .alert .alert-info}\n\nPhoto by [Daniele Levis Pelusi](https://unsplash.com/photos/Pp9qkEV_xPk?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/automation?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,232],{"slug":3861,"featured":6,"template":699},"triage-issues-gitmate","content:en-us:blog:triage-issues-gitmate.yml","Triage Issues Gitmate","en-us/blog/triage-issues-gitmate.yml","en-us/blog/triage-issues-gitmate",{"_path":3867,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3868,"content":3874,"config":3880,"_id":3882,"_type":13,"title":3883,"_source":15,"_file":3884,"_stem":3885,"_extension":18},"/en-us/blog/try-out-new-way-to-migrate-projects",{"title":3869,"description":3870,"ogTitle":3869,"ogDescription":3870,"noIndex":6,"ogImage":3871,"ogUrl":3872,"ogSiteName":685,"ogType":686,"canonicalUrls":3872,"schema":3873},"Moving projects easily: GitLab migration automation benefits","Learn how our new direct transfer feature, in beta, is speeding migrations.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668857/Blog/Hero%20Images/migration.jpg","https://about.gitlab.com/blog/try-out-new-way-to-migrate-projects","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab project migration and automation - a perfect pair for faster, easier transfers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Magdalena Frankiewicz\"}],\n        \"datePublished\": \"2023-01-18\",\n      }",{"title":3875,"description":3870,"authors":3876,"heroImage":3871,"date":3877,"body":3878,"category":693,"tags":3879},"GitLab project migration and automation - a perfect pair for faster, easier transfers",[2257],"2023-01-18","\n\nSince Version 14.3, GitLab has supported [migrating GitLab groups by direct transfer](https://docs.gitlab.com/ee/user/group/import/#migrate-groups-by-direct-transfer-recommended), where, rather than manually uploading export files, data is transferred directly from the source instance to the destination instance. We have been working to extend this functionality to projects and are including the ability to migrate projects by direct transfer as a beta in GitLab 15.8.\n\nThis beta feature is **available to everyone**, enabled by default on GitLab.com and with [some configuration](#availability-of-the-feature)\non self-managed GitLab instances.\n\n## Benefits of the direct transfer method\n\nMigrating by direct transfer enables you to easily migrate GitLab group and project resources between GitLab instances and within the same GitLab\ninstance, using either the UI or API.\n\nThis is a major improvement from migrating [groups](https://docs.gitlab.com/ee/user/group/import/#migrate-groups-by-uploading-an-export-file-deprecated) and [projects using file exports](https://docs.gitlab.com/ee/user/project/settings/import_export.html) because:\n\n- You don't need to manually export each individual group and project to a file and then import all those export files to a new location. Now any top-level group you have the Owner role for (plus subgroups when using API) and all its projects can be migrated automatically, making your work more efficient.\n- When migrating from GitLab Self-Managed to GitLab.com, user associations (such as comment author) previously were linked to the user who ran the import. Migration using direct transfer maps users and their contributions correctly, provided [a few conditions are met](https://docs.gitlab.com/ee/user/group/import/#preparation).\n\n## Availability of the feature\n\nThe beta release for migrating GitLab projects with top-level groups by direct transfer is available on GitLab.com. You can migrate from a self-managed GitLab instance to GitLab.com or within GitLab.com right now!\n\nGitLab Self-Managed users have access to migrating projects by direct transfer beta, too. Administrators need to enable:\n\n- an [application setting](https://docs.gitlab.com/ee/administration/settings/visibility_and_access_controls.html#enable-migration-of-groups-and-projects-by-direct-transfer) for migrating groups\n~~- the `bulk_import_projects` [feature flag](https://docs.gitlab.com/ee/administration/feature_flags.html), for migrating projects in the groups~~\n\nWe have removed that feature flag in GitLab 15.10, so only the application setting needs to be enabled.\n\nThis change enables GitLab Dedicated instances to take advantage of the feature.\n\nWe recommend upgrading self-managed instances to the latest version possible before migrating groups and projects.\n\n## Trying the new feature out\n\nTo get started with the new feature, you can either [read the documentation](https://docs.gitlab.com/ee/user/group/import/#migrate-groups-by-direct-transfer-recommended) or follow the\nsteps below.\n\n1. Make sure the [feature is available](#availability-of-the-feature) to you.\n1. Generate or copy a [personal access token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html) with the `api` scope on your source GitLab instance. Both `api` and `read_repository` scopes are required when migrating from GitLab 15.0 and earlier.\n1. On the top navigation, select **+**, then **New group**, and then **Import group**.\n1. Enter the URL of your source GitLab instance.\n1. Enter the personal access token for your source GitLab instance and select **Connect instance**.\n  ![Screenshot of connecting the source instance](https://about.gitlab.com/images/blogimages/migrate-gitlab-projects-images/connect-source-instance.png){: .shadow}\n1. Select the groups to import from the top-level groups on the connected source instance you have the Owner role for. All the projects within chosen groups can be migrated too! Choose from the dropdown the group you want to migrate to for each group you have selected. Adjust the newly created group name, if needed.\n  ![Screenshot of choosing groups to import](https://about.gitlab.com/images/blogimages/migrate-gitlab-projects-images/choose-groups-to-import.png){: .shadow}\n1. Next to the groups you want to import, select **Import with projects**. The **Status** column shows the import status of each group. If you leave the page open, it updates in real time.\n1. After a group has been imported, select its GitLab path to open the imported group.\n\nFor more information about migrating by direct transfer (for example, what resources are migrated and [group import history](https://docs.gitlab.com/ee/user/group/import/index.html#group-import-history)), see our [documentation](https://docs.gitlab.com/ee/user/group/import/index.html).\n\n## What about migrating projects using file exports? \n\nOnce the migrating projects by direct transfer feature is ready for production use at any scale, migrating groups and projects using file exports\nwill be disabled by a feature flag and only migrating groups and projects by direct transfer will be available in the UI and API.\n\nBecause migrating by direct transfer requires network connection between instances or GitLab.com, customers that are using air-gapped networks with no\nnetwork connectivity between their GitLab instances will need to reenable migrating using file exports. They will be able to use migrating groups and\nprojects by direct transfer after we extend this solution to [also support offline instances](https://gitlab.com/groups/gitlab-org/-/epics/8985).\n\nWe will not fully remove migrating using file exports until we support all our customers with a new solution.\n\n## What's next for migrating by direct transfer method \n\nOf course, we're not done yet! We will be improving the direct transfer method before we come out of beta. We're working on:\n\n- Making the migration [efficient](https://gitlab.com/groups/gitlab-org/-/epics/8983) and [reliable](https://gitlab.com/groups/gitlab-org/-/epics/8927)\n  for large projects.\n- Improving [feedback during migration and when migration is finished](https://gitlab.com/groups/gitlab-org/-/epics/8984).\n\nNext, we will be focusing on:\n\n- Enabling more granular imports, where you'll be able to:\n  - Migrate any group in the UI, not only top-level ones. Migrating subgroups is currently limited to the API.\n  - Choose which projects within a group you want to migrate.\n- Importing [project relations not yet included in migration](https://gitlab.com/groups/gitlab-org/-/epics/9319).\n- Automatically [migrating users](https://gitlab.com/groups/gitlab-org/-/epics/4616).\n\nDetails about the migrating by direct transfer roadmap can be found on our [direction page](https://about.gitlab.com/direction/manage/import_and_integrate/importers/).\n\nWe are excited about this roadmap and hope you are too! We want to hear from you. What's the most important missing piece for you? What else can we improve? Let us know in the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/284495) and we'll keep iterating!\n\n**Disclaimer:** This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.\n\n_Cover photo by [Chris Briggs](https://unsplash.com/@cgbriggs19?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://www.unsplash.com)_\n",[834,1014,767,9],{"slug":3881,"featured":6,"template":699},"try-out-new-way-to-migrate-projects","content:en-us:blog:try-out-new-way-to-migrate-projects.yml","Try Out New Way To Migrate Projects","en-us/blog/try-out-new-way-to-migrate-projects.yml","en-us/blog/try-out-new-way-to-migrate-projects",{"_path":3887,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3888,"content":3894,"config":3899,"_id":3901,"_type":13,"title":3902,"_source":15,"_file":3903,"_stem":3904,"_extension":18},"/en-us/blog/tutorial-secure-and-optimize-your-maven-repository-in-gitlab",{"title":3889,"description":3890,"ogTitle":3889,"ogDescription":3890,"noIndex":6,"ogImage":3891,"ogUrl":3892,"ogSiteName":685,"ogType":686,"canonicalUrls":3892,"schema":3893},"Tutorial: Secure and optimize your Maven Repository in GitLab","Learn the best practices, advanced techniques, and upcoming features that improve the efficiency of your DevSecOps workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666187/Blog/Hero%20Images/blog-image-template-1800x945__6_.png","https://about.gitlab.com/blog/tutorial-secure-and-optimize-your-maven-repository-in-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Secure and optimize your Maven Repository in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2025-05-22\",\n      }",{"title":3889,"description":3890,"authors":3895,"heroImage":3891,"date":3896,"body":3897,"category":787,"tags":3898},[761],"2025-05-22","As a GitLab product manager, I'm excited to share insights on securing and optimizing your Maven repository. We're passionate about providing a complete DevSecOps platform, and the Maven repository is part of this ecosystem. Explore best practices, advanced techniques, and upcoming features that will transform your Maven workflow.\n\n## Securing your Maven repository: A comprehensive approach\n\nSecuring your software supply chain is more critical than ever so let's dive into strategies to fortify your Maven packages in GitLab.\n\n### Implement strong authentication\n\n**Personal access tokens:** Use PATs for fine-grained access control.\n\nFor example:\n\n```bash\nmvn deploy -s settings.xml\n```\n\nWhere `settings.xml` contains:\n\n```xml\n\u003Csettings>\n  \u003Cservers>\n    \u003Cserver>\n      \u003Cid>gitlab-maven\u003C/id>\n      \u003Cconfiguration>\n        \u003ChttpHeaders>\n          \u003Cproperty>\n            \u003Cname>Private-Token\u003C/name>\n            \u003Cvalue>${env.GITLAB_PERSONAL_TOKEN}\u003C/value>\n          \u003C/property>\n        \u003C/httpHeaders>\n      \u003C/configuration>\n    \u003C/server>\n  \u003C/servers>\n\u003C/settings>\n```\n\n**Deploy tokens:** Ideal for CI/CD pipelines. Generate these in your GitLab project settings and use them in your `.gitlab-ci.yml`.\n\n```yaml\ndeploy:\n  script:\n    - 'mvn deploy -s ci_settings.xml'\n  variables:\n    MAVEN_CLI_OPTS: \"-s ci_settings.xml --batch-mode\"\n    MAVEN_OPTS: \"-Dmaven.repo.local=.m2/repository\"\n  only:\n    - main\n```\n\nThe corresponding `ci_settings.xml` file:\n\n```xml\n\u003Csettings xmlns=\"http://maven.apache.org/SETTINGS/1.1.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n  xsi:schemaLocation=\"http://maven.apache.org/SETTINGS/1.1.0 http://maven.apache.org/xsd/settings-1.1.0.xsd\">\n  \u003Cservers>\n    \u003Cserver>\n      \u003Cid>gitlab-maven\u003C/id>\n      \u003Cconfiguration>\n        \u003ChttpHeaders>\n          \u003Cproperty>\n            \u003Cname>Deploy-Token\u003C/name>\n            \u003Cvalue>${env.CI_DEPLOY_PASSWORD}\u003C/value>\n          \u003C/property>\n        \u003C/httpHeaders>\n      \u003C/configuration>\n    \u003C/server>\n  \u003C/servers>\n\u003C/settings>\n```\n\nIn this setup:\n\n* The `CI_DEPLOY_PASSWORD` should be set as a CI/CD variable in your GitLab project settings containing the deploy token.\n* The `\u003Cid>` should match the repository ID in your project's `pom.xml` file.\n\n**Token rotation:** Implement a token rotation policy using GitLab's API. For example, you could create a scheduled pipeline that rotates tokens monthly:\n\n```yaml\nrotate_tokens:\n  script:\n    - curl --request POST \"https://gitlab.example.com/api/v4/projects/${CI_PROJECT_ID}/deploy_tokens\" --header \"PRIVATE-TOKEN: ${ADMIN_TOKEN}\" --form \"name=maven-deploy-${CI_PIPELINE_ID}\" --form \"scopes[]=read_registry\" --form \"scopes[]=write_registry\"\n  only:\n    - schedules\n```\n\n### Leverage GitLab's built-in security features\n\n**Dependency Scanning:** Enable it in your `.gitlab-ci.yml`.\n\n```yaml\ninclude:\n  - template: Security/Dependency-Scanning.gitlab-ci.yml\n\nvariables:\n  DS_JAVA_VERSION: 11\n```\n\n**Container Scanning:** If you're containerizing your Maven applications.\n\n```yaml\ninclude:\n  - template: Security/Container-Scanning.gitlab-ci.yml\n\nvariables:\n  CS_IMAGE: $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA\n```\n\n**License Compliance:** Ensure all dependencies comply with your project's licensing requirements.\n\n```yaml\ninclude:\n  - template: Security/License-Scanning.gitlab-ci.yml\n```\n\n### Secure your CI/CD pipeline\n\n* **CI/CD variables:** Store sensitive information securely.\n\n  ```yaml\n  variables:\n    MAVEN_REPO_USER: ${CI_DEPLOY_USER}\n    MAVEN_REPO_PASS: ${CI_DEPLOY_PASSWORD}\n  ```\n* **Masked variables:** Prevent exposure in job logs. Set these in your GitLab CI/CD settings.\n* **Protected branches and tags:** Configure these in your GitLab project settings to control who can trigger package publishing.\n\n### Implement package signing\n\n* Use the Maven GPG plugin to sign your artifacts.\n\n  ```xml\n  \u003Cplugin>\n    \u003CgroupId>org.apache.maven.plugins\u003C/groupId>\n    \u003CartifactId>maven-gpg-plugin\u003C/artifactId>\n    \u003Cversion>1.6\u003C/version>\n    \u003Cexecutions>\n      \u003Cexecution>\n        \u003Cid>sign-artifacts\u003C/id>\n        \u003Cphase>verify\u003C/phase>\n        \u003Cgoals>\n          \u003Cgoal>sign\u003C/goal>\n        \u003C/goals>\n      \u003C/execution>\n    \u003C/executions>\n  \u003C/plugin>\n  ```\n\n* Store your GPG key securely using GitLab CI/CD variables.\n\n### Control package access\n\n* Use GitLab's project and group-level package registry settings to restrict access.\n* Implement IP allowlists for network-level access control in your GitLab instance settings.\n\n## Optimize performance: Streamline your Maven workflow\n\nEfficiency is crucial when working with large projects or numerous dependencies. Here are advanced techniques to optimize your Maven package usage in GitLab.\n\n### Utilize dependency management\n\n* Use the `\u003CdependencyManagement>` section in your parent POM.\n\n  ```xml\n  \u003CdependencyManagement>\n    \u003Cdependencies>\n      \u003Cdependency>\n        \u003CgroupId>org.springframework.boot\u003C/groupId>\n        \u003CartifactId>spring-boot-dependencies\u003C/artifactId>\n        \u003Cversion>${spring-boot.version}\u003C/version>\n        \u003Ctype>pom\u003C/type>\n        \u003Cscope>import\u003C/scope>\n      \u003C/dependency>\n    \u003C/dependencies>\n  \u003C/dependencyManagement>\n  ```\n### Leverage multi-module projects\n\n  * Structure your project with a parent POM and multiple modules:\n\n    ```\n    my-project/\n    ├── pom.xml\n    ├── module1/\n    │   └── pom.xml\n    ├── module2/\n    │   └── pom.xml\n    └── module3/\n        └── pom.xml\n    ```\n  * Use Maven's reactor to build modules in the optimal order:\n\n    ```bash\n    mvn clean install\n    ```\n\n### Implement parallel builds\n\n* Use Maven's parallel build feature:\n\n  ```bash\n  mvn -T 4C clean install\n  ```\n\n### Optimize for CI/CD\n\n* In `.gitlab-ci.yml`, use caching to speed up builds:\n\n  ```yaml\n  cache:\n    paths:\n      - .m2/repository\n\n  build:\n    script:\n      - mvn clean package -Dmaven.repo.local=$CI_PROJECT_DIR/.m2/repository\n  ```\n* Implement incremental builds:\n\n  ```yaml\n  build:\n    script:\n      - mvn clean install -Dmaven.repo.local=$CI_PROJECT_DIR/.m2/repository -am -amd -fae\n  ```\n\n### Utilize build caching\n\n* Use the Gradle Enterprise Maven Extension for build caching:\n\n  ```xml\n  \u003Cbuild>\n    \u003Cplugins>\n      \u003Cplugin>\n        \u003CgroupId>com.gradle\u003C/groupId>\n        \u003CartifactId>gradle-enterprise-maven-plugin\u003C/artifactId>\n        \u003Cversion>1.9\u003C/version>\n        \u003Cconfiguration>\n          \u003CgradleEnterprise>\n            \u003Cserver>https://ge.example.com\u003C/server>\n            \u003CallowUntrusted>false\u003C/allowUntrusted>\n          \u003C/gradleEnterprise>\n        \u003C/configuration>\n      \u003C/plugin>\n    \u003C/plugins>\n  \u003C/build>\n  ```\n\n## Introducing the Maven Virtual Registry beta program\n\nI'm thrilled to announce the launch of our beta program for the upcoming Maven virtual registry feature. This addition to our package ecosystem will change how you manage Maven repositories in GitLab.\n\n### Key features of Maven Virtual Registry\n\n1. **Repository aggregation:** Combine multiple Maven repositories (both internal and external) into a single virtual repository.\n2. **Smart proxy and caching:** Improve build times by caching artifacts and intelligently routing requests.\n3. **Centralized Access Control:** Enhance security by managing access to all repositories from a single point.\n\n### How it works\n\n1. **Configuration:** Configure Maven authentication in your `settings.xml`:\n\n```\n\u003Csettings>\n  \u003Cservers>\n    \u003Cserver>\n      \u003Cid>gitlab-maven\u003C/id>\n      \u003Cconfiguration>\n        \u003ChttpHeaders>\n          \u003Cproperty>\n            \u003Cname>Private-Token\u003C/name>\n            \u003Cvalue>${env.GITLAB_TOKEN}\u003C/value>\n          \u003C/property>\n        \u003C/httpHeaders>\n      \u003C/configuration>\n    \u003C/server>\n  \u003C/servers>\n\u003C/settings>\n```\n\nAuthentication options:\n\n- Personal access token: Use `Private-Token` as the name and `${env.GITLAB_TOKEN}` as the value.\n\n-  Group deploy token: Use `Deploy-Token` as the name and `${env.GITLAB_DEPLOY_TOKEN}` as the value.\n\n- Group access token: Use `Private-Token` as the name and `${env.GITLAB_ACCESS_TOKEN}` as the value.\n\n- CI job token: Use `Job-Token` as the name and `${CI_JOB_TOKEN}` as the value.\n\n- Configure the virtual registry in your `pom.xml`.\n\nOption 1: As an additional registry:\n\n```\n\u003Crepositories>\n  \u003Crepository>\n    \u003Cid>gitlab-maven\u003C/id>\n    \u003Curl>https://gitlab.example.com/api/v4/virtual_registries/packages/maven/\u003Cvirtual registry id>\u003C/url>\n  \u003C/repository>\n\u003C/repositories>\n```\n\nOption 2: As a replacement for Maven Central (in your `settings.xml`):\n\n```\n\u003Cmirrors>\n  \u003Cmirror>\n    \u003Cid>gitlab-maven\u003C/id>\n    \u003Cname>GitLab virtual registry for Maven Central\u003C/name>\n    \u003Curl>https://gitlab.example.com/api/v4/virtual_registries/packages/maven/\u003Cvirtual registry id>\u003C/url>\n    \u003CmirrorOf>central\u003C/mirrorOf>\n  \u003C/mirror>\n\u003C/mirrors>\n```\n\n2. **Usage:** Now all your Maven operations will use the virtual repository.\n\n```\n# For personal access tokens\nexport GITLAB_TOKEN=your_personal_access_token\n\n# For group deploy tokens\nexport GITLAB_DEPLOY_TOKEN=your_deploy_token\n\n# For group access tokens\nexport GITLAB_ACCESS_TOKEN=your_access_token\n\n# Then run Maven commands normally\nmvn package\n\n```\n\n3. Benefits\n\n- Simplified dependency management\n- Improved build times\n- Enhanced security and compliance\n- Better control over third-party dependencies\n\n### Join the beta program\n\nWe're actively seeking participants for our beta program. As a beta tester, you'll have the opportunity to:\n\n* Get early access to the Maven Virtual Registry feature.\n* Provide direct feedback to our development team.\n* Shape the future of Maven package management in GitLab.\n* Participate in exclusive webinars and Q&A sessions with our product team.\n\n> To join the beta program or learn more about the Maven Virtual Registry, please visit the [GitLab Maven Virtual Registry Beta Program](https://gitlab.com/gitlab-org/gitlab/-/issues/498139) (**Note:** This is a placeholder link).\n\n## Summary\n\nAt GitLab, we're committed to providing cutting-edge tools for secure, efficient, and scalable software development. The Maven Virtual Registry is just one example of how we're continuously innovating to meet the evolving needs of developers and platform engineers.\n\nImplementing the security measures and optimization techniques discussed in this post and leveraging upcoming features like the Maven Virtual Registry can improve your Maven workflow within GitLab.\n\nWe're excited about the future of package management in GitLab and can't wait to see how you'll use these features to take your development process to the next level. Stay tuned for more updates and happy coding!",[787,495,834,767,9],{"slug":3900,"featured":90,"template":699},"tutorial-secure-and-optimize-your-maven-repository-in-gitlab","content:en-us:blog:tutorial-secure-and-optimize-your-maven-repository-in-gitlab.yml","Tutorial Secure And Optimize Your Maven Repository In Gitlab","en-us/blog/tutorial-secure-and-optimize-your-maven-repository-in-gitlab.yml","en-us/blog/tutorial-secure-and-optimize-your-maven-repository-in-gitlab",{"_path":3906,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3907,"content":3912,"config":3919,"_id":3921,"_type":13,"title":3922,"_source":15,"_file":3923,"_stem":3924,"_extension":18},"/en-us/blog/tutorial-secure-bigquery-data-publishing-with-gitlab",{"title":3908,"description":3909,"ogTitle":3908,"ogDescription":3909,"noIndex":6,"ogImage":2716,"ogUrl":3910,"ogSiteName":685,"ogType":686,"canonicalUrls":3910,"schema":3911},"Tutorial: Secure BigQuery data publishing with GitLab ","Learn how to create repeatable, auditable, and efficient processes for automating and securing BigQuery data exports.","https://about.gitlab.com/blog/tutorial-secure-bigquery-data-publishing-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Secure BigQuery data publishing with GitLab \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Regnard Raquedan\"}],\n        \"datePublished\": \"2025-03-25\",\n      }",{"title":3908,"description":3909,"authors":3913,"heroImage":2716,"date":3915,"body":3916,"category":832,"tags":3917},[3914],"Regnard Raquedan","2025-03-25","GitLab offers a powerful solution for automating and securing\n[BigQuery](https://cloud.google.com/bigquery) data exports. This integration\ntransforms manual exports into repeatable, auditable processes that can\neliminate security vulnerabilities while saving valuable time. This tutorial\nexplains how to implement this solution so you can quickly reduce manual\noperations, permission issues, and security concerns with just a few lines\nof GitLab YAML code.\n\n\nFollow along with this step-by-step video:\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/gxXX-ItAreo?si=FijY9wMVppCW-18q\" frameborder=\"0\" allowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n## The solution architecture\n\n\nOur solution leverages GitLab CI/CD pipelines to automate the secure export\nof data from BigQuery to Google Cloud Storage. Here's the high-level\narchitecture:\n\n\n1. SQL code is stored and version-controlled in GitLab.  \n\n2. After code review and approval, GitLab CI/CD pipeline executes the\ncode.  \n\n3. The pipeline authenticates with Google Cloud.  \n\n4. SQL queries are executed against BigQuery.  \n\n5. Results are exported as CSV files to Google Cloud Storage.  \n\n6. Secure links to these files are provided for authorized consumption.\n\n\n## Prerequisites\n\n\nBefore we begin, ensure you have:\n\n\n* **Google Cloud APIs enabled:** BigQuery API and Cloud Storage API  \n\n* **Service account** with appropriate permissions:  \n  * BigQuery Job User  \n  * Storage Admin  \n  * **Note:** For this demo, we're using the service account approach for authentication, which is simpler to set up. For production environments, you might consider using GitLab's identity and access management integration with Google Cloud. This integration leverages Workload Identity Federation, which provides enhanced security and is more suitable for enterprise customers and organizations.  \n* **GitLab project** ready to store your SQL code and pipeline configuration\n\n\n## Step-by-step implementation\n\n\n**1. Configure Google Cloud credentials.**\n\n\nFirst, set up the necessary environment variables in your GitLab project:\n\n\n- Go to your **GitLab project > Settings > CI/CD**.  \n\n- Expand the **Variables** section.  \n\n- Add the following variables:  \n   * `GCS_BUCKET`: Your Google Cloud Storage bucket name  \n   * `GCP_PROJECT_ID`: Your Google Cloud project ID  \n   * `GCP_SA_KEY`: Base64-encoded service account key (mark as masked)\n\n**2. Create your SQL query.**\n\n\nCreate a file named `query.sql` in your GitLab repository with your BigQuery\nSQL query. The query looks like this:\n\n\n```\n\n-- This query shows a list of the daily top Google Search terms.\n\nSELECT\n   refresh_date AS Day,\n   term AS Top_Term,\n       -- These search terms are in the top 25 in the US each day.\n   rank,\nFROM `bigquery-public-data.google_trends.top_terms`\n\nWHERE\n   rank = 1\n       -- Choose only the top term each day.\n   AND refresh_date >= DATE_SUB(CURRENT_DATE(), INTERVAL 2 WEEK)\n       -- Filter to the last 2 weeks.\nGROUP BY Day, Top_Term, rank\n\nORDER BY Day DESC\n   -- Show the days in reverse chronological order.\n\n```\n\n\nThis query gets the top 25 search terms from Google Trends for the current\nday.\n\n\n**3. Configure the GitLab CI/CD pipeline.**\n\n\nCreate a `.gitlab-ci.yml` file in your repository root:\n\n\n```\n\nimage: google/cloud-sdk:alpine\n\n\ninclude:\n  - template: Jobs/Secret-Detection.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml\n\nexecute:\n  stage: deploy\n  script: \n    # Set up Google Cloud authentication and install necessary components\n    - export GOOGLE_CLOUD_CREDENTIALS=$(echo $SERVICE_ACCOUNT_KEY | base64 -d)\n    - echo $GOOGLE_CLOUD_CREDENTIALS > service-account-key.json \n    - gcloud auth activate-service-account --key-file service-account-key.json \n    - gcloud components install gsutil\n    # Set the active Google Cloud project\n    - gcloud config set project $PROJECT_ID\n    # Run the BigQuery query and export the results to a CSV file\n    - bq query --format=csv --use_legacy_sql=false \u003C test.sql > results.csv\n    # Create a Google Cloud Storage bucket if it doesn't exist\n    - gsutil ls gs://${CLOUD_STORAGE_BUCKET} || gsutil mb gs://${CLOUD_STORAGE_BUCKET}\n    # Upload the CSV file to the storage bucket\n    - gsutil cp results.csv gs://${CLOUD_STORAGE_BUCKET}/results.csv\n    # Set the access control list (ACL) to make the CSV file publicly readable\n    - gsutil acl ch -u AllUsers:R gs://${CLOUD_STORAGE_BUCKET}/results.csv\n    # Define the static URL for the CSV file\n    - export STATIC_URL=\"https://storage.googleapis.com/${CLOUD_STORAGE_BUCKET}/results.csv\"\n    # Display the static URL for the CSV file\n    - echo \"File URL = $STATIC_URL\"\n\n```\n\n\n**4. Run the pipeline.**\n\n\nNow, whenever changes are merged to your main branch, the pipeline will\nprovide a link to the CSV file stored on the Google Cloud Storage bucket.\nThis file contains the result of the executed SQL query that GitLab subjects\nto security checks.\n\n\n## Benefits of this approach\n\n\n* **Security:** Authentication is handled automatically via service accounts\n(or Workload Identity Federation for enhanced security in production\nenvironments).  \n\n* **Auditability:** All data exports are tracked through GitLab commits and\npipeline logs.  \n\n* **Repeatability:** Consistent, predictable export process on every run,\nand can be scheduled.  \n\n* **Version control:** SQL queries are properly versioned and reviewed.  \n\n* **Automation:** Significantly fewer manual exports, reducing human error.\n\n\n## Try it today\n\n\nBy combining GitLab's DevSecOps capabilities with Google Cloud's BigQuery\nand Cloud Storage, you've now automated and secured your data publishing\nworkflow. This approach reduces manual operations, resolves permission\nheadaches, and addresses security concerns – all achieved with just a few\nlines of GitLab CI code.\n\n\n> Use this tutorial's [complete code\nexample](https://gitlab.com/gitlab-partners-public/google-cloud/demos/big-query-data-publishing)\nto get started now.\n",[1159,495,1035,9,232,3918],"google",{"slug":3920,"featured":90,"template":699},"tutorial-secure-bigquery-data-publishing-with-gitlab","content:en-us:blog:tutorial-secure-bigquery-data-publishing-with-gitlab.yml","Tutorial Secure Bigquery Data Publishing With Gitlab","en-us/blog/tutorial-secure-bigquery-data-publishing-with-gitlab.yml","en-us/blog/tutorial-secure-bigquery-data-publishing-with-gitlab",{"_path":3926,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3927,"content":3933,"config":3938,"_id":3940,"_type":13,"title":3941,"_source":15,"_file":3942,"_stem":3943,"_extension":18},"/en-us/blog/unveiling-a-new-epic-experience-for-improved-agile-planning",{"title":3928,"description":3929,"ogTitle":3928,"ogDescription":3929,"noIndex":6,"ogImage":3930,"ogUrl":3931,"ogSiteName":685,"ogType":686,"canonicalUrls":3931,"schema":3932},"Unveiling a new epic experience for improved Agile planning","Explore the update for GitLab epics that enhances planning and improves workflows – all with seamless migration for better project management.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749660011/Blog/Hero%20Images/blog-image-template-1800x945__21_.png","https://about.gitlab.com/blog/unveiling-a-new-epic-experience-for-improved-agile-planning","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Unveiling a new epic experience for improved Agile planning\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Amanda Rueda\"}],\n        \"datePublished\": \"2024-07-03\",\n      }",{"title":3928,"description":3929,"authors":3934,"heroImage":3930,"date":3935,"body":3936,"category":1506,"tags":3937},[1880],"2024-07-03","In our ongoing journey to enhance the Agile planning experience in GitLab, we [recently unveiled a new look](https://about.gitlab.com/blog/first-look-the-new-agile-planning-experience-in-gitlab/). This update marks a significant step toward creating a unified and flexible planning tool tailored to your needs. This article explores a crucial part of that initiative: the new epic experience. You'll learn about upcoming epic features and the motivations behind these changes, which are designed to elevate your project management capabilities.\n\n## Why the new epic experience?\n\n### Addressing user feedback\nAs part of our mission to provide a comprehensive Agile planning experience, we've listened closely to your feedback. Users have highlighted challenges with the current epic implementation, such as inconsistent features between epics and issues and a lack of flexibility to support diverse workflows. Some pain points focused on workflow tools, including the absence of assignees on epics and a lack of reusable templates. The new epic experience addresses these pain points and makes Agile planning more intuitive and efficient.\n\n### Unified Work Items framework\nTo tackle these issues, we've introduced a unified Work Items framework. This new architecture ensures consistency across all planning objects — epics, issues, and tasks — simplifying the user experience and enhancing functionality. By consolidating the underlying code, we can deliver new features and improvements faster, ensuring a smoother and more reliable planning process.\n\n> Read more about [what is to come with GitLab Agile planning](https://about.gitlab.com/blog/first-look-the-new-agile-planning-experience-in-gitlab/).\n\n## Key features of the new epic experience\n\n### Enhanced detail page\nOne of the most notable changes is the revamped epic detail page. The new design offers a cleaner, more intuitive interface, making it easier to manage and track your epics.\n\nHere are some new key features:\n* **Assignees** - assign epics to team members, improving accountability and oversight.\n* **Health status** - quickly gauge the status of your epics with new health indicators.\n* **Time tracking** - create better visibility over time spent and ensure efficient use of resources across your projects.\n* **Ancestry** - view the entire hierarchy lineage of the epic.\n* **Condensed description** - easily view long work item descriptions without having to scroll excessively. Descriptions are truncated by default, with a \"Show more\" link to expand the full text on demand. This streamlines your workflow by allowing you to quickly scan descriptions and only expand them when needed, reducing clutter and improving readability.\n* **Custom color** - customize the color related to epics viewed on the roadmap now with the ability to define a custom color, use HEX or RGB codes, or choose from an expanded predefined palette. \n\n![new epic experience screenshot](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674437/Blog/Content%20Images/Screenshot_2024-07-10_at_4.22.45_p.m..png)\n\n### Consistency across planning objects\nThe new epic experience aligns closely with the new issues experience coming soon (spoiler alert!) and tasks, providing a seamless and cohesive user experience. This consistency helps streamline workflows and reduces the learning curve for new users.\n\n### Additional functionality\nWe plan to iteratively add exciting new features that will enhance your planning capabilities. Our goal is to allow you to tailor planning processes within GitLab to best fit your organization’s unique needs. Once we’ve released the new epics experience, you can expect to see additional functionality with every release! There are many great features to come – here are some of my favorites:\n- [Templates](https://gitlab.com/gitlab-org/gitlab/-/issues/428690)\n- [Custom fields](https://gitlab.com/groups/gitlab-org/-/epics/235)\n- [Configurable statuses](https://gitlab.com/groups/gitlab-org/-/epics/5099)\n- [Project-level epics](https://gitlab.com/gitlab-org/gitlab/-/issues/31840)\n- [Cloning](https://gitlab.com/gitlab-org/gitlab/-/issues/339768)\n- [Moving to another group/project](https://gitlab.com/gitlab-org/gitlab/-/issues/339766)\n- [Milestones](https://gitlab.com/groups/gitlab-org/-/epics/329)\n\n## Migration expectations\nWe understand that any change can be disruptive, so we've designed the migration to the new epic experience to be as seamless as possible. All existing epic data, APIs, and URLs will continue to function as expected. Users do not need to take any action to prepare for this transition. For our self-managed customers, learn how you can preview the new experience in a test environment ahead of general availability [here](https://docs.gitlab.com/ee/user/group/epics/epic_work_items.html).\n\n## Community feedback and engagement\nWe value your input and encourage you to share your experiences with the new epic experience. Your feedback is essential to help refine and improve our tools. Please visit our [epic experience feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/494462) to provide your thoughts and suggestions.\n\n## What's next\nThe new epic experience in GitLab represents a significant leap forward in our Agile planning capabilities. With enhanced features, improved consistency, and a user-centric approach, we are confident that these changes will greatly benefit your project management processes. We invite you to explore the new features, provide feedback, and stay tuned for more updates as we continue to innovate and improve.\n\n> [Bookmark this page](https://about.gitlab.com/blog/categories/agile-planning/) to keep up with our Agile planning news.",[744,495,834,767,9],{"slug":3939,"featured":6,"template":699},"unveiling-a-new-epic-experience-for-improved-agile-planning","content:en-us:blog:unveiling-a-new-epic-experience-for-improved-agile-planning.yml","Unveiling A New Epic Experience For Improved Agile Planning","en-us/blog/unveiling-a-new-epic-experience-for-improved-agile-planning.yml","en-us/blog/unveiling-a-new-epic-experience-for-improved-agile-planning",{"_path":3945,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3946,"content":3952,"config":3958,"_id":3960,"_type":13,"title":3961,"_source":15,"_file":3962,"_stem":3963,"_extension":18},"/en-us/blog/use-cases-for-epics",{"title":3947,"description":3948,"ogTitle":3947,"ogDescription":3948,"noIndex":6,"ogImage":3949,"ogUrl":3950,"ogSiteName":685,"ogType":686,"canonicalUrls":3950,"schema":3951},"How the GitLab UX team uses epics","UX Manager Sarrah Vesselov shares how the UX team is using epics to manage their workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680187/Blog/Hero%20Images/how-ux-team-uses-epics.jpg","https://about.gitlab.com/blog/use-cases-for-epics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How the GitLab UX team uses epics\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarrah Vesselov\"}],\n        \"datePublished\": \"2018-03-19\",\n      }",{"title":3947,"description":3948,"authors":3953,"heroImage":3949,"date":3955,"body":3956,"category":832,"tags":3957},[3954],"Sarrah Vesselov","2018-03-19","\n\nOne of the challenges for UX here at GitLab is how to work iteratively, making the smallest changes possible, while maintaining a holistic view of the application. As the manager for the UX department, I was curious to see how we could use [epics](https://docs.gitlab.com/ee/user/group/epics/) to better plan and track UX efforts over time.\n\n\u003C!-- more -->\n\n## What are epics?\n\nThe term 'epic' is most commonly associated with Agile methodology. In Agile, an epic is a collection of user stories that describe a larger user flow, typically consisting of multiple features. So, what does ‘epic’ mean at GitLab? Here, epics contain a title and description, much like an issue, and allow you to attach multiple child issues to indicate hierarchy. In short, an epic is a feature that allows you to manage a portfolio of projects more efficiently and with less effort by tracking groups of issues that share a theme, across projects and milestones.\n\nWhat this meant for the UX team was that we finally had an efficient way to plan, track, and execute a group of thematically related issues. Take the merge request page for example. We have over 100 issues related to UX improvements for this feature alone! Each issue, taken on its own, represents just one piece of a much bigger picture. Epics would allow us to define the goal we have for the entire page and organize issues specific to that effort.\n\n## Getting started with epics\n\nTo get started with epics, we put together a UX strategy template. This template would be filled out and added to the epic description. The template defined the following:\n\n- **Challenges:** What user problem are we trying to solve? What business problem are we trying to solve? Are there obstacles standing in the way?\n\n- **Vision:** What do we want to achieve?\n\n- **Focus Areas:** What will we focus our attention on to have the most impact?\n\n- **Mission:** How will we achieve this goal?\n\n- **Activity/Deliverables:** What will we do and what will we deliver?\n\n- **Measure:** How will we measure success qualitatively and quantitatively?\n\nThe template also includes links to any relevant [personas](/blog/discovering-gitlabs-personas/) and [research](/blog/conducting-remote-ux-research/) we should consider when working toward the overall goal.\n\n## Creating our first epic\n\nWith the template ready to go, we chose the merge request page as our first area of focus. We started by reviewing the existing UX research for this page. It was essential to use data to understand the pain points and opportunities. We also examined the entire backlog of issues related to this page, matching existing issues to the research findings. With the significant pain points identified, we were able to fill out the template and create our very first epic.\n\n![Merge Request Epic](https://about.gitlab.com/images/blogimages/epics-ux.png){: .shadow}\n\nWith a holistic view of what we wanted to achieve, we could go back and find issues in the backlog that were critical to the vision. These issues were added to the epic and ordered according to priority. As we discover new information, we can reorder these issues to match the change in priority. As the scope expands, we can aggressively break things out into new epics for development at a later time or parallel to the existing epic. In the future, [sub-epics](https://gitlab.com/gitlab-org/gitlab-ee/issues/4282) will make this process even more fluid.\n\n![Merge Request Epic Issues](https://about.gitlab.com/images/blogimages/epic-ux-issues.png){: .shadow}\n\n*\u003Csmall>Issues are listed under the epic description. They can be easily reordered by dragging and dropping them into place.\u003C/small>*\n\nWe also set a time frame for this overall effort to be achieved. Having a set timeframe allows us to resource plan with the product team and make adjustments accordingly.\n\n## Looking ahead\n\nSo far, epics have proven to be well suited for planning long-term UX efforts. It has allowed us to maintain a holistic view of product area while still working iteratively. Epics also give other departments better visibility into what UX considers important. We are already looking beyond the merge request page and using epics to plan other efforts spanning multiple milestones. Epics are still relatively new, and there are many additions yet to come. In future releases, they will support [labeling](https://gitlab.com/gitlab-org/gitlab-ee/issues/4032), [discussions](https://gitlab.com/gitlab-org/gitlab-ee/issues/3889), [project-level epics](https://gitlab.com/gitlab-org/gitlab-ee/issues/4019), and integration with [issues](https://gitlab.com/gitlab-org/gitlab-ee/issues/4684) and [roadmaps](https://gitlab.com/gitlab-org/gitlab-ee/issues/3559).\n\n![Roadmap feature for epics](https://about.gitlab.com/images/blogimages/roadmaps.png){: .shadow}\n\nThe [Roadmap feature](https://gitlab.com/gitlab-org/gitlab-ee/issues/3559), pictured above, is set to be released in 10.5. Roadmaps offer a graphical, high level overview of an epic, or multiple epic's, goals and deliverables presented on a timeline. The blue roadmap bar and the epic list item are clickable and will navigate to that epic's detail page.\n\n## Resources\n- [Portfolio Management Roadmap](/direction/#portfolio-management-and-issue-management )\n\nPhoto by [Dmitri Popov](https://unsplash.com/) on [Unsplash](https://unsplash.com/search/photos/scale?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[744,9,696,791],{"slug":3959,"featured":6,"template":699},"use-cases-for-epics","content:en-us:blog:use-cases-for-epics.yml","Use Cases For Epics","en-us/blog/use-cases-for-epics.yml","en-us/blog/use-cases-for-epics",{"_path":3965,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3966,"content":3972,"config":3978,"_id":3980,"_type":13,"title":3981,"_source":15,"_file":3982,"_stem":3983,"_extension":18},"/en-us/blog/use-gitlab-ai-features-out-of-the-box-in-a-gitlab-workspace",{"title":3967,"description":3968,"ogTitle":3967,"ogDescription":3968,"noIndex":6,"ogImage":3969,"ogUrl":3970,"ogSiteName":685,"ogType":686,"canonicalUrls":3970,"schema":3971},"Use GitLab AI features out-of-the-box in a GitLab Workspace","GitLab Workspaces now ships with the GitLab workflow extension preinstalled, providing access to powerful AI features like GitLab Duo Chat and Code Suggestions for increased productivity.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098843/Blog/Hero%20Images/Blog/Hero%20Images/securitylifecycle-light_securitylifecycle-light.png_1750098843047.png","https://about.gitlab.com/blog/use-gitlab-ai-features-out-of-the-box-in-a-gitlab-workspace","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Use GitLab AI features out-of-the-box in a GitLab Workspace\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Safwan Ahmed\"}],\n        \"datePublished\": \"2024-07-24\",\n      }",{"title":3967,"description":3968,"authors":3973,"heroImage":3969,"date":3975,"body":3976,"category":764,"tags":3977},[3974],"Safwan Ahmed","2024-07-24","AI is transforming the way we get work done, from helping automate mundane tasks to optimizing aspects of our day-to-day workflow. Of particular relevance has been [generative AI’s ability](https://about.gitlab.com/the-source/ai/how-to-put-generative-ai-to-work-in-your-devsecops-environment/) to support developers in getting the job done, from code snippet suggestions to concise summaries of technical questions. These AI tools have been embedded in the development lifecycle through integrations with existing software like code editors and CI/CD platforms. Thanks to these integrations, particularly in the case of code editors, developers can have an AI assistant that complements their skills within their development environment.\n\nWhile these AI tools can help boost productivity, setting them up in an existing development environment may not be preferable. For example, you may not want to install a new dependency on your local workstation that could affect your setup, you may have security or privacy concerns about running AI tools on your computer, or you may find it hard to give the tooling context on your existing workflow. GitLab resolves these issues by providing a suite of tools that allow you to leverage the power of AI in [a remote development workspace](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/) right out of the box. In this blog, you'll learn about the GitLab features that make this possible and how to set up your [workspace environment](https://docs.gitlab.com/ee/user/workspace/) to get started.\n\n## GitLab workflow extension for VS Code\n\nGitLab workflow extension for VS Code integrates GitLab into the VS Code editor. It brings into scope key elements of your GitLab workflow such as issues, merge requests, and pipeline status. For more information, visit [the GitLab workflow extension documentation](https://docs.gitlab.com/ee/editor_extensions/visual_studio_code/).\n\n## GitLab Workspaces\n\nGitLab Workspaces provide an isolated development environment to make changes to your GitLab projects. Workspaces offer a platform to work on your projects without the complexity of setting up local dependencies. Workspaces also provide reproducible development setups, as a workspace environment configuration created by one developer can be shared with others. GitLab Workspaces are configured to use the VS Code editor and ship with the workflow extension preinstalled. To learn more, visit the [GitLab Workspaces documentation](https://docs.gitlab.com/ee/user/workspace/).\n\n## GitLab Duo Chat and Code Suggestions\n\n[GitLab Duo Chat](https://about.gitlab.com/blog/gitlab-duo-chat-now-generally-available) and [GitLab Duo Code Suggestions](https://about.gitlab.com/blog/gitlab-duo-code-suggestions-is-generally-available/) are part of the GitLab Duo suite of AI features enhancing developer productivity. Chat and Code Suggestions are integrated into the workflow extension and are GitLab context-aware. This allows you to ask GitLab Duo questions about items like issues and merge requests and to automatically have access to code suggestions and code completion. This integration requires [a GitLab Duo license](https://about.gitlab.com/gitlab-duo/). See the [GitLab Duo Chat documentation](https://docs.gitlab.com/ee/user/gitlab_duo_chat/) and [GitLab Duo Code Suggestions documentation](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/) for more information.\n\n## How to set up the Workflow extension, Workspaces, and GitLab Duo to work together\n\nWhile these features are impressive on their own, when combined they deliver on the promise of an easy-to-spin-up, isolated, AI-driven development environment. Here are the steps to get this powerhouse up and running.\n\n## Create a workspace\n\nFollow this [comprehensive but easy-to-follow tutorial](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/) to create a remote development workspace.\n\n## Validate GitLab Workflow is active\n\nAfter your workspace is up and running, you should see a GitLab icon on the side of your editor like the following:\n\n![Arrow pointing to GitLab tanuki icon](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098853/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098853108.png)\n\nYou can then use the workflow extension to bring up merge requests assigned to you in the current project in GitLab. To do this, access the command palette by hitting `command + shift + P` and entering `GitLab: Show Merge Requests Assigned to Me`. This will redirect you to GitLab and show your assigned MRs.\n\n![Arrow pointing to 'GitLab: Show Merge Requests Assigned to Me'](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098853/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098853109.png)\n\nFor more tips and tricks, read [Visual Studio code editor: Eight tips for using GitLab VS Code](https://about.gitlab.com/blog/vscode-workflows-for-working-with-gitlab/).\n\n## Use GitLab Duo Chat\n\nYou should also see a second, smaller GitLab icon on your sidebar. This gives you access to GitLab Duo Chat. Feel free to ask it a question.\n\n![Arrow pointing to GitLab tanuki icon with sparkles around it](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098853/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098853111.png)\n\n## Use GitLab Duo Code Suggestions\n\nOpen up any source file in your directory. You can begin typing code and have predictive suggestions, powered by GitLab Duo Code Suggestions, pop up –  you can insert them by hitting the tab key. The example below shows my attempt to write a string processing function. Code Suggestions has inferred I would want to split the passed string into spaces, which is indeed my intention.\n\n![Code Suggestions suggesting that the passed string into spaces](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098853/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098853112.png)\n\nSuppose I have completed my string processing function above and would like to generate unit tests for it but want to avoid the chore of writing boilerplate code. You can provide a comment in your editor and have Code Suggestions generate code for you like the following:\n\n![Shows boilerplate code for generating unit tests](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098853/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098853115.png)\n\nCode Suggestions implements a whole unit test for my function, covering happy and sad paths.\n\nFor more exciting uses of the GitLab Duo suite, check out these articles:\n* [10 best practices for using AI-powered GitLab Duo Chat](https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat/)\n* [Top tips for efficient AI-powered Code Suggestions with GitLab Duo](https://about.gitlab.com/blog/top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo/)\n* [\"Developing GitLab Duo\" blog series](https://about.gitlab.com/blog/developing-gitlab-duo-series/)\n\n# Next steps\n\nGitLab Workspaces is coming up with more exciting integrations and features that will enhance your remote development experience, be sure to check out the [category epic](https://gitlab.com/groups/gitlab-org/-/epics/7419) to know what’s coming next!\n\n> Sign up for [a free trial of GitLab Duo](https://about.gitlab.com/gitlab-duo/) today!\n",[766,9,232,834],{"slug":3979,"featured":90,"template":699},"use-gitlab-ai-features-out-of-the-box-in-a-gitlab-workspace","content:en-us:blog:use-gitlab-ai-features-out-of-the-box-in-a-gitlab-workspace.yml","Use Gitlab Ai Features Out Of The Box In A Gitlab Workspace","en-us/blog/use-gitlab-ai-features-out-of-the-box-in-a-gitlab-workspace.yml","en-us/blog/use-gitlab-ai-features-out-of-the-box-in-a-gitlab-workspace",{"_path":3985,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":3986,"content":3991,"config":3996,"_id":3998,"_type":13,"title":3999,"_source":15,"_file":4000,"_stem":4001,"_extension":18},"/en-us/blog/using-gitlab-web-ide-gitlab-ci-cd",{"title":3987,"description":3988,"ogTitle":3987,"ogDescription":3988,"noIndex":6,"ogImage":1293,"ogUrl":3989,"ogSiteName":685,"ogType":686,"canonicalUrls":3989,"schema":3990},"How to make small changes using GitLab’s Web IDE","A quick three minute demo shows how teams can deliver better apps faster using GitLab CI/CD.","https://about.gitlab.com/blog/using-gitlab-web-ide-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to make small changes using GitLab’s Web IDE\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-05-28\",\n      }",{"title":3987,"description":3988,"authors":3992,"heroImage":1293,"date":3993,"body":3994,"category":718,"tags":3995},[1113],"2020-05-28","\n\nIt’s not enough to say something is quick and easy. To have a better understanding of some of the benefits of using [GitLab CI/CD](/topics/ci-cd/), it’s much better to _show_ you.\n\nIn a [short video](https://www.youtube.com/watch?v=6207TKNGgJs&feature=emb_logo), [Itzik Gan-Baruch](/company/team/#iganbaruch) technical marketing manager, demonstrates how to submit a code change using GitLab Web IDE. In three minutes, teams can submit a code change and commit it, trigger a CI pipeline to scan for any errors, and ship the updated application to users.\n\n## Getting started with GitLab Web IDE\n\nAll code that gets automatically tested and deployed to production has a human at its source. In GitLab 10.7, we released the [first iteration of our Web Integrated Development Environment (IDE)](/blog/introducing-gitlab-s-integrated-development-environment/) after observing how non-developers struggled with editing multiple files and committing those changes. Since we believe that [everyone can contribute](/company/mission/#mission), building an editor that was integrated with GitLab that made it easier for anyone to contribute seemed like a natural fit. To access the Web IDE, just click the button from any GitLab project.\n\n![Web IDE](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_1.png){: .shadow.medium.center}\n\nThe Web IDE button\n{: .note.text-center}\n\nIn this simple project with a job application, you can use the Web IDE to make a code change and push it to a feature branch. Select the file you would like to change from the menu on the left.\n\n![Selecting a file](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_2.png){: .shadow.medium.center}\n\nSelecting a file from the Wed IDE\n{: .note.text-center}\n\nOnce you’ve modified the text in that file, add a commit message and create a new branch. Click `Commit` to create a merge request.\n\n![Commit](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_3.png){: .shadow.medium.center}\n\nCommit to create a merge request\n{: .note.text-center}\n\nYour commit generates a merge request, and from here you can add an assignee, tie this code change to a specific milestone, add labels, or add any additional information regarding the change.\n\n![Modify merge request](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_4.png){: .shadow.medium.center}\n\nSubmit merge request\n{: .note.text-center}\n\nA new [continuous integration pipeline](/solutions/continuous-integration/) is triggered automatically. Click on the pipeline to see the stages.\n\n![Pipeline](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_5.png){: .shadow.medium.center}\n\nClick on the pipeline from the merge request\n{: .note.text-center}\n\nIn this project, the pipeline needed zero-configuration because it was generated through GitLab's [Auto DevOps](/direction/delivery/auto_devops/) capability. The pipeline has stages and a few jobs within each stage.\n\n![Auto DevOps pipeline](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_6.png){: .shadow.medium.center}\n\nA CI pipeline automatically configured with GitLab Auto DevOps\n{: .note.text-center}\n\nFirst, it builds a Docker image for the code and pushes it to the container registry. From there, it begins tests and scans jobs that run in parallel to help speed up the pipeline.\n\n![Pipeline jobs](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_7.png){: .shadow.medium.center}\n\nClick on a job within the pipeline stage to get more information\n{: .note.text-center}\n\nBy clicking on a job within the stage, you can see what happens.\n\n![dependency scan](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_8.png){: .shadow.medium.center}\n\nDependency scanning details\n{: .note.text-center}\n\nOnce all tests are completed, all test results will be added to the merge request that was created. The merge request is really the key to using GitLab as a code collaboration and [version control platform](/topics/version-control/). It’s simply a request to merge one branch into another.\n\n![merge requests](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_9.png){: .shadow.medium.center}\n\nMerge requests for this project\n{: .note.text-center}\n\n[Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) are a way to visualize the changes that were made. Click `View app` once the pipeline has completed to access the staging environment.\n\n![Review apps](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_10.png){: .shadow.medium.center}\n\nSelect `View app` to access a staging environment once a pipeline completes.\n{: .note.text-center}\n\nIn this environment, only changes that were made in the merge request will be displayed. This link can be sent to others so they can view the changes from a web browser.\n\n![staging environment](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_12.png){: .shadow.medium.center}\n\nThe Review App for this project\n{: .note.text-center}\n\nFrom the merge request, you can see the test results, including changes to code quality and the security scans. This scan detected 20 new vulnerabilities. If you’d like more information, just click `Expand` on the right.\n\n![pipeline test results](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_13.png){: .shadow.medium.center}\n\nPipeline test results\n{: .note.text-center}\n\nOnce the results have been expanded, you can click on each one to get more details.\n\n![SAST scan](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_14.png){: .shadow.medium.center}\n\nSAST vulnerabilities detected\n{: .note.text-center}\n\nBy clicking on one of these results, you can see the file that caused the vulnerability as well as the problematic lines of code.\n\n![security report](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_15.png){: .shadow.medium.center}\n\nSecurity report\n{: .note.text-center}\n\nFrom this menu, you can choose to dismiss the vulnerability or create an issue so that someone can fix it. Details from the test will be added to the issue automatically.\n\n![new issue](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_16.png){: .shadow.medium.center}\n\nA new issue created to investigate a vulnerability\n{: .note.text-center}\n\nFrom your original merge request, you can collaborate with others and have them take a look at the proposed changes.\n\n![collaborate on merge request](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_17.png){: .shadow.medium.center}\n\nTag someone in a merge request to have them see your changes\n{: .note.text-center}\n\nOnce you’ve gathered feedback and all pipelines have passed, click the `merge` button to trigger a new pipeline to deploy your application to production\n\n![Web IDE](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_18.png){: .shadow.medium.center}\n\nClick `merge` to trigger a deployment pipeline\n{: .note.text-center}\n\nThis workflow shows how anyone can contribute code without using a command line. The Web IDE makes it easy for anyone to make changes without introducing additional risks or quality issues, all from the GitLab interface.\n\nTo see this three-minute demo in real-time, just watch the video below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/6207TKNGgJs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[108,9,1035],{"slug":3997,"featured":6,"template":699},"using-gitlab-web-ide-gitlab-ci-cd","content:en-us:blog:using-gitlab-web-ide-gitlab-ci-cd.yml","Using Gitlab Web Ide Gitlab Ci Cd","en-us/blog/using-gitlab-web-ide-gitlab-ci-cd.yml","en-us/blog/using-gitlab-web-ide-gitlab-ci-cd",{"_path":4003,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4004,"content":4009,"config":4015,"_id":4017,"_type":13,"title":4018,"_source":15,"_file":4019,"_stem":4020,"_extension":18},"/en-us/blog/using-run-parallel-jobs",{"title":4005,"description":4006,"ogTitle":4005,"ogDescription":4006,"noIndex":6,"ogImage":2486,"ogUrl":4007,"ogSiteName":685,"ogType":686,"canonicalUrls":4007,"schema":4008},"How we used parallel CI/CD jobs to increase our productivity","GitLab uses parallel jobs to help long-running jobs run faster.","https://about.gitlab.com/blog/using-run-parallel-jobs","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we used parallel CI/CD jobs to increase our productivity\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Miguel Rincon\"}],\n        \"datePublished\": \"2021-01-20\",\n      }",{"title":4005,"description":4006,"authors":4010,"heroImage":2486,"date":2277,"body":4012,"category":832,"tags":4013},[4011],"Miguel Rincon","At GitLab, we must verify simultaneous changes from the hundreds of people\nthat contribute to GitLab each day. How can we help them contribute\nefficiently using our pipelines?\n\n\nThe pipelines that we use to build and verify GitLab have more than 90 jobs.\nNot all of those jobs are equal. Some are simple tasks that take a few\nseconds to finish, while others are long-running processes that must be\noptimized carefully.\n\n\nAt the time of this writing, we have more than 700 [pipelines\nrunning](https://gitlab.com/gitlab-org/gitlab/-/pipelines?page=1&scope=all&status=running).\nEach of these pipelines represent changes from team members and contributors\nfrom the wider community. All GitLab contributors must wait for the\npipelines to finish to make sure the change works and integrates with the\nrest of the product. We want our pipelines to finish as fast as possible to\nmaintain the productivity of our teams.\n\n\nThis is why we constantly monitor the duration of our pipelines. For\nexample, in December 2020, successful merge request pipelines had a duration\nof [53.8\nminutes](/handbook/engineering/quality/performance-indicators/#average-merge-request-pipeline-duration-for-gitlab):\n\n\n![Average pipeline duration was 53.8 minutes in\nDecember](https://about.gitlab.com/images/blogimages/using-run-parallel-jobs/historical-pipeline-duration.png){:\n.shadow.medium.center}\n\nThe average pipeline took 53.8 minutes to finish in December.\n\n{: .note.text-center}\n\n\nGiven that we run [around 500 merge request\npipelines](https://gitlab.com/gitlab-org/gitlab/-/pipelines/charts) per day,\nwe want to know: Can we optimize our process to change how long-running jobs\n_run_?\n\n\n## How we fixed our bottleneck jobs by making them run in parallel\n\n\nThe `frontend-fixtures` job uses `rspec` to generate mock data files, which\nare then saved as files called \"fixtures\". These files are loaded by our\nfrontend tests, so the `frontend-fixtures` must finish before any of our\nfrontend tests can start.\n\n\n> As not all of our tests need these frontend fixtures, many jobs use the\n[`needs` keyword](https://docs.gitlab.com/ee/ci/yaml/#needs) to start before\nthe `frontend-fixtures` job is done.\n\n\nIn our pipelines, this job looked like this:\n\n\n![The `frontend-fixtures`\njob](https://about.gitlab.com/images/blogimages/using-run-parallel-jobs/fixtures-job.png){:\n.shadow.medium.center}\n\nInside the frontend fixtures job.\n\n{: .note.text-center}\n\n\n\nThis job had a normal duration of 20 minutes, and each individual fixture\ncould be generated independently, so we knew there was an opportunity to run\nthis process in parallel.\n\n\nThe next step was to configure our pipeline to split the job into multiple\nbatches that could be run in parallel.\n\n\n## How to make frontend-fixtures a parallel job\n\n\nFortunately, GitLab CI provides an easy way to run a job in parallel using\nthe [`parallel` keyword](https://docs.gitlab.com/ee/ci/yaml/#parallel). In\nthe background, this creates \"clones\" of the same job, so that multiple\ncopies of it can run simultaneously.\n\n\n**Before:**\n\n\n```yml\n\nfrontend-fixtures:\n  extends:\n    - .frontend-fixtures-base\n    - .frontend:rules:default-frontend-jobs\n```\n\n\n**After:**\n\n\n```yml\n\nrspec-ee frontend_fixture:\n  extends:\n    - .frontend-fixtures-base\n    - .frontend:rules:default-frontend-jobs\n  parallel: 2\n```\n\n\nYou will notice two changes. First, we changed the name of the job, so our\njob is picked up by [Knapsack](https://docs.knapsackpro.com/ruby/knapsack)\n(more on that later), and then we add the keyword `parallel`, so the job\ngets duplicated and runs in parallel.\n\n\nThe new jobs that are generated look like this:\n\n\n![Our fixtures job running in\nparallel](https://about.gitlab.com/images/blogimages/using-run-parallel-jobs/fixtures-job-parallel.png){:\n.shadow.medium.center}\n\nThe new jobs that are picked up by Knapsack and run in parallel.\n\n{: .note.text-center}\n\n\nAs we used a value of `parallel: 2`, actually two jobs are generated with\nthe names:\n\n\n- `rspec-ee frontend_fixture 1/2`\n\n- `rspec-ee frontend_fixture 2/2`\n\n\nOur two \"generated\" jobs, now take three and 17 minutes respectively, giving\nus an overall decrease of about three minutes.\n\n\n![Two parallel jobs in the\npipeline](https://about.gitlab.com/images/blogimages/using-run-parallel-jobs/fixtures-job-detail.png){:\n.shadow.medium.center}\n\nThe parallel jobs that are running in the pipeline.\n\n{: .note.text-center}\n\n\n## Another way we optimized the process\n\n\nAs we use Knapsack to distribute the test files among the parallel jobs, we\nwere able to make more improvements by reducing the time it takes our\nlongest-running fixtures-generator file to run.\n\n\nWe did this by splitting the file into smaller batches and optimizing it, so\nwe have more tests running in parallel, which shaved off an additional [~3.5\nminutes](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/47158#note_460372560).\n\n\n## Tips for running parallel jobs\n\n\nIf you want to ramp up your productivity you can leverage `parallel` on your\npipelines by following these tips:\n\n\n1. Measure the time your pipelines take to run and identify possible\nbottlenecks to your jobs. You can do this by checking which jobs are slower\nthan others.\n\n1. Once your slow jobs are identified, try to figure out if they can be run\nindependently from each other or in batches.\n   - Automated tests are usually good candidates, as they tend to be self-contained and run in parallel anyway.\n1. Add the `parallel` keyword, while measuring the outcome over the next few\nrunning pipelines.\n\n\n## Learn more about our solution\n\n\nWe discuss how running jobs in parallel improved the speed of pipelines on\nGitLab Unfiltered.\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/hKsVH_ZhSAk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\nAnd here are links to some of the resources we used to run pipelines in\nparallel:\n\n\n- The [merge request that introduced `parallel` to\nfixtures](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/46959).\n\n- An important [optimization\nfollow-up](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/47158) to\nmake one of the slow tests faster.\n\n- The [Knapsack gem](https://docs.knapsackpro.com/ruby/knapsack), which we\nleverage to split the tests more evenly in multiple CI nodes.\n\n\nAnd many thanks to [Rémy Coutable](/company/team/#rymai), who helped me\nimplement this improvement.\n\n\nCover image by [@dustt](https://unsplash.com/@dustt) on\n[Unsplash](https://unsplash.com/photos/ZqBNb7xK5s8)\n\n{: .note}\n",[790,2025,4014,1074,9],"CD",{"slug":4016,"featured":6,"template":699},"using-run-parallel-jobs","content:en-us:blog:using-run-parallel-jobs.yml","Using Run Parallel Jobs","en-us/blog/using-run-parallel-jobs.yml","en-us/blog/using-run-parallel-jobs",{"_path":4022,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4023,"content":4029,"config":4035,"_id":4037,"_type":13,"title":4038,"_source":15,"_file":4039,"_stem":4040,"_extension":18},"/en-us/blog/value-stream-total-time-chart",{"title":4024,"description":4025,"ogTitle":4024,"ogDescription":4025,"noIndex":6,"ogImage":4026,"ogUrl":4027,"ogSiteName":685,"ogType":686,"canonicalUrls":4027,"schema":4028},"Value stream optimization with GitLab's Total Time Chart","Learn how this new analytics feature provides immediate insights about the time spent in each stage of your workstream.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667913/Blog/Hero%20Images/clocks.jpg","https://about.gitlab.com/blog/value-stream-total-time-chart","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Value stream management: Total Time Chart simplifies top-down optimization flow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2023-06-01\",\n      }",{"title":4030,"description":4025,"authors":4031,"heroImage":4026,"date":4032,"body":4033,"category":693,"tags":4034},"Value stream management: Total Time Chart simplifies top-down optimization flow",[1660],"2023-06-01","\n\nUnderstanding where time is spent during the development lifecycle is a crucial insight for software leaders when optimizing the value delivery to customers. Our new Value Stream Analytics Total Time Chart is a visualization that helps managers uncover how long it actually takes to complete the development process from idea to production. Managers also can learn how much time teams spend in each stage of the workflow.\n \n![The VSA Total Time Chart displays the average time to complete each value stream stage.](https://about.gitlab.com/images/blogimages/2023-05-07-vsa-overview.gif){: .shadow}\nValue Stream Analytics Total Time Chart\n{: .note.text-center}\n\nValue Stream Analytics is available out of the box in the GitLab platform. It surfaces the process and value delivery metrics through the unified data model that stores all the records around development efforts. Value Stream Analytics uses a backend process to collect and aggregate stage-level data into [three core objects](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#how-value-stream-analytics-works):\n\n- Value streams - container objects with stage list \n- Value stream stage - an event pair of start and end events\n- Value stream stage events - the smallest building blocks of the value stream. For example, from Issue created to Issue first added to board. See the [list of available stage events](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#value-stream-stage-events).\n\n> [Register for the GitLab 16 webinar](/sixteen/), where we will unveil the latest innovations in our AI-powered DevSecOps platform.\n\nWe added in the new chart the stages breakdown as a stacked area chart to make it easier to understand how each stage contributes to the total time, and how that changes over time. Each area in the chart represents a stage. By comparing the heights of each area, you can get an idea about how each stage contributes to the total time of the value stream. We also added a tool tip with the stages breakdown sorted top to bottom, to help you understand the stages in their correct order.\n\nThe new chart is available in the Value Stream Analytics Overview page (on the left sidebar, select **Analytics > Value stream**). This page includes four sections:\n  1.  Data filter text box - on the top of the Overview page you can use the [Data filters](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#data-filters) to view data that matches specific criteria or date range. \n  2. Stage navigation bar - below the filter text box you can use the the stage navigation bar to investigate what happened in the specific stage and to identify the items (issues/MRs) that are slowing down the stage time.\n  3. Key metrics tiles - the summary of the stream performance is displayed, above the chart in the [Key metrics tiles](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#key-metrics). \n  4. Overview charts - the newly added Total Time Chart and the [Task by type](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#view-tasks-by-type) chart. \n\nBut that's not all. The Total Time Chart also simplifies the top-down optimization flow, starting from the Value Streams Dashboard organization-level view to a drill-down into the performance of each project:\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/EA9Sbks27g4\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n\nFrom the Value Stream Analytics overview page, you can drill down from Key metrics tiles into other GitLab analytics pages for deeper investigations. You can also go up to the Value Streams Dashboard, or investigate the [DORA metrics](/solutions/value-stream-management/dora/) that are also available in the new dashboard.\n\nIt's important to note that the chart data is limited to items completed within the selected date range. Also, there could be points in time with no [\"stage event\"](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#value-stream-stage-events) actions. In these cases, the chart will display a dashed line to represent the missing data. These gaps can add contextual information about the workstream, and usually do not represent interruptions in the data. When there is \"no data\" for a specific stage, the stage line will be flat.\n\nTo learn more check out the [Value Stream Analytics documentation](https://docs.gitlab.com/ee/user/group/value_stream_analytics/).\n\nWith the Value Stream Analytics Total Time Chart, you get immediate insights about the time spent in each stage over time to determine if progress is being made. Try it out today and see the difference it can make in your workstream!\n",[744,722,834,1074,9],{"slug":4036,"featured":6,"template":699},"value-stream-total-time-chart","content:en-us:blog:value-stream-total-time-chart.yml","Value Stream Total Time Chart","en-us/blog/value-stream-total-time-chart.yml","en-us/blog/value-stream-total-time-chart",{"_path":4042,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4043,"content":4049,"config":4056,"_id":4058,"_type":13,"title":4059,"_source":15,"_file":4060,"_stem":4061,"_extension":18},"/en-us/blog/verizon-customer-story",{"title":4044,"description":4045,"ogTitle":4044,"ogDescription":4045,"noIndex":6,"ogImage":4046,"ogUrl":4047,"ogSiteName":685,"ogType":686,"canonicalUrls":4047,"schema":4048},"Verizon cuts datacenter rebuilds from 30 days to 8 hours","Verizon utilized microservices, automation, and GitLab to reduce datacenter rebuilds to under 8 hours.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678933/Blog/Hero%20Images/verizon_video_blog.jpg","https://about.gitlab.com/blog/verizon-customer-story","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Verizon Connect reduced datacenter rebuilds from 30 days to under 8 hours with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kim Lock\"}],\n        \"datePublished\": \"2019-02-14\",\n      }",{"title":4050,"description":4045,"authors":4051,"heroImage":4046,"date":4053,"body":4054,"category":1584,"tags":4055},"How Verizon Connect reduced datacenter rebuilds from 30 days to under 8 hours with GitLab",[4052],"Kim Lock","2019-02-14","\nIn 2016, the [Verizon Connect](https://www.verizonconnect.com/) Telematics Container Cloud Platform team was struggling with data center\nbuilds that took 30 days. Working with legacy systems that included Java-based, monolithic\napplications, they also had a variety of disparate tools including BitBucket, Jenkins, and Jira\nin use throughout their environment.\n\n### Starting from scratch to move to microservices and increase automation\n\nThe group looked to move to a [microservices architecture](/blog/strategies-microservices-architecture/) to improve deploy speed and increase\nautomation. They also wanted to overcome manual errors, disjointed processes, and\nmanual deploys. \"We were just spending too much time doing stuff manually, so we decided\nto just start fresh and write everything from scratch,\" says Mohammed Mehdi, Principal DevOps, Verizon.\n\nAs they created this new infrastructure, they kept four key components in mind: architecture,\nautomation, extensibility, and being proactive and prepared for the future. They wanted to rebuild\ntheir data centers in less than 12 hours, instead of 30 days. They had a goal of 100 percent CI/CD.\nThey wanted to remove manual deployments, especially around the server and network deployments.\nThe team also focused on avoiding vendor lock-in by seeking open source tools to help them accomplish these goals.\n\nThe team looked to improve automation by focusing on simplification, standardization, and providing end-to-end visibility.\n\"We wanted easily repeatable, with zero-touch, zero-downtime deployments, automated tracking,\" Mehdi explains.\n\n### A single solution to meet their needs\n\nThe team chose GitLab to support this infrastructure initiative because it met a number of their qualifications, including being open source and offering Windows support. The team liked that it is easy to use and the UI easy to understand.\n\n\"Some of the other features that we really loved, and we didn’t find with any other CI/CD tool, are the project management\nfeatures,\" Mehdi says. \"GitLab replaced a bunch of disparate systems for us like Jira, BitBucket, and Jenkins. GitLab\nprovided us with a one-stop solution.\"\n\nThe Verizon Connect Telematics Container Cloud Platform team is using GitLab for:\n\n- [Code review](/blog/demo-mastering-code-review-with-gitlab/)\n- [CI/CD](/solutions/continuous-integration/)\n- [Issue tracking](/pricing/feature-comparison/)\n- [Source Code Management](/solutions/source-code-management/)\n- [Audit Management](https://docs.gitlab.com/ee/administration/audit_events.html)\n- [ChatOps](https://docs.gitlab.com/ee/ci/chatops/)\n\nThe team has successfully achieved deployment flexibility and are platform agnostic. They now have\nstreamlined processes and developers can truly focus on differentiating tasks.\n\nThe team was able to reduce their complete datacenter deploy\nprocess to under eight hours because of the streamlined deploy and build processes\nthey enabled using GitLab. Learn how [Verizon Connect](https://www.verizonconnect.com/) is achieving this success by watching\nmore about their story and how they achieved their targets in [the YouTube video](https://youtu.be/zxMFaw5j6Zs) below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/zxMFaw5j6Zs\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThanks for giving GitLab a shot, Verizon Connect!\n\nCover image by [chuttersnap](https://unsplash.com/@chuttersnap) on [Unsplash](https://unsplash.com)\n{: .note}\n",[1096,108,722,1364,9],{"slug":4057,"featured":6,"template":699},"verizon-customer-story","content:en-us:blog:verizon-customer-story.yml","Verizon Customer Story","en-us/blog/verizon-customer-story.yml","en-us/blog/verizon-customer-story",{"_path":4063,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4064,"content":4070,"config":4075,"_id":4077,"_type":13,"title":4078,"_source":15,"_file":4079,"_stem":4080,"_extension":18},"/en-us/blog/wag-labs-blog-post",{"title":4065,"description":4066,"ogTitle":4065,"ogDescription":4066,"noIndex":6,"ogImage":4067,"ogUrl":4068,"ogSiteName":685,"ogType":686,"canonicalUrls":4068,"schema":4069},"How Wag! cut their release process from 40 minutes to just 6","The popular dog-walking app is rolling out new features faster and with more confidence as they adopt GitLab for more of their DevOps workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678923/Blog/Hero%20Images/dog-walking.jpg","https://about.gitlab.com/blog/wag-labs-blog-post","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Wag! cut their release process from 40 minutes to just 6\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2019-01-16\",\n      }",{"title":4065,"description":4066,"authors":4071,"heroImage":4067,"date":4072,"body":4073,"category":1584,"tags":4074},[1680],"2019-01-16","\nDo you own a dog and work outside of the home? If you do, or even just know someone who does, you know that finding a trustworthy caretaker is of the utmost importance. With dog walkers in cities and towns across the U.S., the folks at [Wag!](https://wagwalking.com/about) have proven to be a source of reliable caretakers for countless fur parents. In three years, the company has powered more than one billion walks via its app for on-demand dog walking, sitting, and boarding, that boasts of millions of users.\n\nWag! recently signed on with GitLab to make the most of their engineering hours and bring their customers new features and updates at a faster clip.\n\n### From version control, to CI, to the full pipeline\n\nHaving previously used GitLab as their main source of truth for repositories, Wag! initially planned to return to the app solely for [continuous integration (CI)](/solutions/continuous-integration/). But after giving it a whirl, they quickly expanded their strategy to include the use of other features.\n\n\"We started our GitLab project about seven or eight months ago,\" explains [Dave Bullock](https://www.linkedin.com/in/eecue), director of engineering at Wag! \"The original idea was to just use it as our CI platform. But as we built that out, we started using it for more and more tasks, and ended up using it for our full [CI/CD pipeline](/topics/ci-cd/). That includes both our application, so the CI/CD that powers the API, along with our infrastructure. We use GitLab with Terraform to test, review, save, and deploy all of our infrastructure as well as the application on two separate pipelines. Every team uses it in their application, whether it's the Android application, the web application, the API, or our infrastructure; it's all being tested, built, and deployed through GitLab.\"\n\n### Streamlining to a single application\n\nPart of GitLab's appeal stemmed from the [ability to do everything in one place](/topics/single-application/). Wag! was searching for an [integrated solution](/solutions/continuous-integration/) that would streamline their development process, and they found it in GitLab.\n\n\"We were previously using a combination of Travis and other random technologies, and we just wanted something with a little bit better interface, a little more control, and something that we owned as far as the hosting and the management,\" says Bullock. \"We really wanted to move towards a single, full-service application.\"\n\n>\"We just wanted something with a better interface, a little more control, and something that we owned as far as the hosting and the management. We really wanted to move towards a single, full-service application.\"\n\nThe impact of that choice is also being felt on the infrastructure side. Wag!'s infrastructure engineers no longer have to manually stage and test their work. They are now following the same basic workflow that is used for their app, while integrating Terraform to manage their infrastructure.\n\n\"Basically, one of our DevOps team members will make a change, cut a pull request, and it'll be reviewed by the team. If it looks good, we'll say, 'Okay, cool. Merge it into master,'\" Bullock explains. \"If it's one of the modules, we'll tag that module, update the reference to it, and then the CI pipeline will kick off. It'll test the syntax, look for any security issues, and alert a Slack channel if there are any. It'll then stage a full version of the environment and test it. So, it stages all the pieces: the database, cache, and everything else, and tests it all to make sure that it works, just like we would be testing our production website.\n\n\"If that passes, then it allows you to see what your changes are going to do before you apply them,\" he continues. \"We call it Terraform plan. So, it runs Terraform plan on each piece of our infrastructure, and it'll tell us something like, 'Hey, we see 34 changes and 2 destructions and 1 creation in this environment. Click here to review.' Then the group will review it and if it looks good, we'll apply it in production. Having that as a full pipeline is really great.\"\n\n>“Now it's so easy to deploy something and roll it back if there's an issue. It's taken the stress and the fear out of deploying into production.” – Dave Bullock, Director of Engineering\n\n### Easy learning curve\n\nSome of the Wag! engineers had working experience with GitLab, while others had not. Nonetheless, Bullock found the onboarding of his teams to be a fairly easy process due to the intuitive nature of the interface.\n\n\"I think once you kind of understand how CI works, it's basically about following things step by step,\" he says. \"Pipelines were a new concept to a lot of the team, but once you see it happening visually, it's really easy to understand what's going on, expand and add to it. It's a really useful interface. Seeing all those green dots or red dots makes it really clear what's going on.\"\n\n### Built-in security, shaving down test times and faster releases\n\nAs part of their ramp up in GitLab, the dog-walking service recently furled [automated security scanning and license management](/solutions/security-compliance/) into their workflow, with Bullock noting how \"great\" it is to have those features baked into the pipeline so that immediate action can be taken when needed.\n\nWag! currently issues three releases a day, with plans to bump that number up to eight or more. Since adopting GitLab, they have seen a massive improvement in the amount of time spent on the release process. **What previously took 40 minutes to an hour to accomplish, now takes just six minutes.**\n\n\"Traditionally, the release process was slow, fragile, and limited to only a few key release engineers who had access to 10 different systems to monitor, make changes, and log into to make updates and pull in the latest code. It was not optimal. Now it's literally a single pane of glass. A lot of it just happens automatically when you merge `develop` into `master` and tag it.\"\n\nThe release process time should improve even more once Wag! engineers switch from manually pushing parts of the release through to automating the process.\n\n\"Right now, we're still clicking through the interface and saying, 'Okay, do this, now let's monitor,'\" says Bullock. \"But I think as we become more comfortable with it, we'll go to fully automated deployments. Literally, just let it go and deploy. If we see an uptick in errors, we'll let it roll back on its own. But as it is now, it's so easy to deploy something and roll it back ourselves if there's an issue. It's taken the stress and the fear out of deploying into production.\"\n\n### Adopting DevOps\n\nWag!'s engineering team has big plans for 2019. They are currently in the process of moving their repositories from GitHub to GitLab and are planning to switch from Amazon ECS to [Kubernetes](/solutions/kubernetes/). This is all part of their roadmap to implementing DevOps.\n\n\"I think we're going to start working on the project in Q1 and it will be really awesome to have all the bells and functionality,\" Bullock says. \"We're excited about Auto DevOps and a lot of new things GitLab has coming down the pipeline. We're going to push pretty hard on that this year.\n\n\"I'm a big fan of DevOps in general, so I think the closer that you can bring the development engineers to the ops side, the better things work,\" he adds. \"I would love for every software engineer or backend engineer to take ownership of the environment that their code runs in, or at least be able to experiment with it and kind of instantly just spin up a full working environment that is the same as our production environment, which we do now, but not with Kubernetes. I think removing that friction is great.\"\n\n### Growing with GitLab\n\nGitLab's releases are a treat the folks at Wag! look forward to checking out each month. The rollout of new features, which are partly determined by user feedback, tend to correlate with the engineering needs of the growing dog-walking and boarding service.\n\n\"I think it's exciting that as we're growing and adding interesting pieces to our infrastructure and application, we're seeing GitLab grow with your monthly release cycles,\" says Bullock. \"Every month there's some new stuff that we're like, 'Oh cool, we could use that, that's perfect.' It's nice to have GitLab as a partner that's growing with us, and it's exciting to see the parallels of new features that you're launching and how it's solving our problems and optimizing things. There's all kinds of cool stuff, and every time we start using a new piece of GitLab, I feel like, 'Okay, that's great, we're really getting our money’s worth.'\"\n\nPhoto by [Andrii Podilnyk](https://unsplash.com/photos/dWSl8REfpoQ?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/dog-walk?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[1096,2025,696,722,787,789,1364,9],{"slug":4076,"featured":6,"template":699},"wag-labs-blog-post","content:en-us:blog:wag-labs-blog-post.yml","Wag Labs Blog Post","en-us/blog/wag-labs-blog-post.yml","en-us/blog/wag-labs-blog-post",{"_path":4082,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4083,"content":4089,"config":4095,"_id":4097,"_type":13,"title":4098,"_source":15,"_file":4099,"_stem":4100,"_extension":18},"/en-us/blog/want-secure-software-development-our-top-5-tips-to-bring-dev-and-sec-together",{"title":4084,"description":4085,"ogTitle":4084,"ogDescription":4085,"noIndex":6,"ogImage":4086,"ogUrl":4087,"ogSiteName":685,"ogType":686,"canonicalUrls":4087,"schema":4088},"Developing secure software: Top tips for dev-sec integration","Every DevOps team wants secure software development but it's surprisingly hard to achieve. Here are 5 strategies to bring dev and sec together.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679444/Blog/Hero%20Images/twotogether.jpg","https://about.gitlab.com/blog/want-secure-software-development-our-top-5-tips-to-bring-dev-and-sec-together","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Want secure software development? Our top 5 tips to bring dev and sec together\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-01-10\",\n      }",{"title":4090,"description":4085,"authors":4091,"heroImage":4086,"date":4092,"body":4093,"category":718,"tags":4094},"Want secure software development? Our top 5 tips to bring dev and sec together",[2836],"2022-01-10","\nThe most productive DevOps teams achieve secure software development by baking sec in from the start. That’s a worthwhile goal, but the reality is developers and security teams don’t always get along. From squabbles around where the buck stops to finger-pointing about finding and fixing bugs, dev and sec often struggle to get on the same page.\n\nAt a time when the security stakes have never been higher, dev and sec simply have to figure it out.\n\nHere are our top five tips to [bridge the gap between dev and sec](/blog/developer-security-divide/) and truly welcome security into the DevOps fold.\n\n**1. Forget the past**\n\nIn the bad, old days, a security officer swooped in when code was hitting production to point out problems and demand changes, often with little to no context or explanation. Developers didn’t exactly jump all over themselves to cooperate. TL;DR there’s plenty of blame to explain the lack of secure software development.\n\t\nThankfully, DevOps and modern application development bring fresh narratives and workflows. Nearly 28% of security pros now work in cross-functional DevOps teams, according to our [2021 Global DevSecOps Survey](/developer-survey/). And over 70% have shifted security left, the survey found. \n\nWhat’s the secret to their success? It’s all about DevOps and the [technology changes required to do it successfully](https://about.gitlab.com/blog/elite-team-strategies-to-secure-software-supply-chains/). Our survey found that teams settled on DevOps for better code quality and faster release times, but the tech choices to support that success – automated testing, security scans, and shift-left security – actually ended up bringing dev and sec closer together.\n\n_The takeaway_: The right technology is surprisingly helpful in breaking down stereotypes.\n\n**2. Learn each other’s languages**\n\nClearly, dev and sec have an ongoing communication problem. \n\nIn fact, they can’t even agree on who “owns” security, as we saw in our survey. A sec pro told us, “Security must be a practice of every member of the team from the front-end developer to the system administrator (and also non-tech roles),” while a dev said, “It’s all up to the developer!”\n\nWork needs to happen, and it starts with the very old-fashioned concept of getting to know one another. A sec pro could attend a developer meet-up, and a dev could sit in on a security retro. For some teams, this is going to have to be a forced function where management mandates cross-functional “lunch and learns,” virtual offsites, or even ice breakers.\n\n_The takeaway_: Yes, even an escape room (or other bonding exercises) can [help a team start to speak the same language](https://blog.hslu.ch/majorobm/2019/03/15/escape-rooms-a-great-team-building-activity/).\n\n**3. Institute a security champions program**\n\nIf you can’t beat them, join them, or in this case, embed them. [Developer security champions]( https://devops.com/devops-security-champion-who-what-and-why/) are known and trusted devs who have an interest and enthusiasm for security and want to share it with colleagues. This can be a very successful strategy to actually shift security left and change mindsets forever. \n\nSecurity champions can be part of a formalized program led by the sec team, or grow in a more organic fashion via an enthusiastic dev. Either way, [experts suggest this is a solid way to bring a DevOps team to DevSecOps](/blog/why-security-champions/).\n\n_The takeaway_: Sometimes the message is heard and understood most clearly from an insider.\n\n**4. Meet dev and sec where they are**\n\nIt’s tough to hold a dev accountable for security problems when the vast majority of them aren’t taught about it in college. And sec pros don’t necessarily know how to code. So is it any surprise that two very different skill sets, degree programs, and job requirements might find it hard to come together?\n\nIt’s not surprising but it is problematic. [One solution](https://techbeacon.com/security/why-developers-dislike-security-what-you-can-do-about-it) involves both sides (figuratively) going back to school. Devs can get hands-on training in security, while sec pros learn how to code.\n\nAlso DevOps managers might consider adding [“security software developer”](https://cybersecurityguide.org/careers/security-software-developer/) to the 2022 roster. This fairly new job title has [over 1,000 postings on Glassdoor.com](https://www.glassdoor.com/Job/united-states-security-software-engineer-jobs-SRCH_IL.0,13_IN1_KO14,40.htm?clickSource=searchBox).\n\n_The takeaway_: Continuing education and cross-functional training can yield enormous benefits.\n\n**5. Make the experience real**\n\nActions can speak louder than words, so why not let developers experience, first-hand, what’s involved in a security breach (and, by implication, what the stakes are)? Invite devs to every hacking exercise planned, and get extra points if a [security red team](https://csrc.nist.gov/glossary/term/red_team_blue_team_approach) is involved. \n\t\nAt the same time, introduce security pros to the user experience (UX) team, and invite them to meet with actual users and hear real-time feedback. \n\n_The takeaway_: It’s impossible to feel anything but invested if you truly feel like you’re part of the process.\n",[722,787,9],{"slug":4096,"featured":6,"template":699},"want-secure-software-development-our-top-5-tips-to-bring-dev-and-sec-together","content:en-us:blog:want-secure-software-development-our-top-5-tips-to-bring-dev-and-sec-together.yml","Want Secure Software Development Our Top 5 Tips To Bring Dev And Sec Together","en-us/blog/want-secure-software-development-our-top-5-tips-to-bring-dev-and-sec-together.yml","en-us/blog/want-secure-software-development-our-top-5-tips-to-bring-dev-and-sec-together",{"_path":4102,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4103,"content":4109,"config":4114,"_id":4116,"_type":13,"title":4117,"_source":15,"_file":4118,"_stem":4119,"_extension":18},"/en-us/blog/ways-to-encourage-collaboration",{"title":4104,"description":4105,"ogTitle":4104,"ogDescription":4105,"noIndex":6,"ogImage":4106,"ogUrl":4107,"ogSiteName":685,"ogType":686,"canonicalUrls":4107,"schema":4108},"3 Ways to foster collaboration","Want to know how we encourage everyone to contribute?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669991/Blog/Hero%20Images/ways-to-encourage-collaboration.jpg","https://about.gitlab.com/blog/ways-to-encourage-collaboration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 Ways to foster collaboration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2017-06-12\",\n      }",{"title":4104,"description":4105,"authors":4110,"heroImage":4106,"date":4111,"body":4112,"category":811,"tags":4113},[1133],"2017-06-12","\n\nWe know that [collaboration is critical](/blog/why-collaboration-tools-matter/) for organizations moving towards a DevOps culture. Here's how we encourage collaboration in our workflow at GitLab.\n\n\u003C!-- more -->\n\n## 1. We make suggesting changes less scary\n\nUsing version control for more than just your source code means that everyone feels free to contribute to documentation, configurations, tests and whatever else you're working on. With the benefit of [merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/), it's possible to suggest a change or an improvement, or even just query something that isn't entirely clear or could be described better, without just going ahead and making the change immediately. This invites discussion and prevents less experienced team members from feeling nervous to voice their opinions.\n\n>\"It really makes the documentation, similar to the source code, an open source and living document that everyone can contribute to.\" – GitLab Platform Backend Lead, [Douwe Maan](/company/team/#DouweM)\n\n## 2. We open our development platform\n\nBy giving everyone in your organization access to view what other teams are working on, you allow everyone to discover and contribute beyond their own projects. This [inner sourcing](https://en.wikipedia.org/wiki/Inner_source) approach makes it more likely that team members can learn from others or offer suggestions from their own experience that could be applied to a different project, avoiding duplication of work. Douwe explains: \"It's working together to make all of our code better, because if we use a shared library – even if it’s just an internal one – if one person improves it or fixes a bug or increases the functionality of that application, that’s work by one person that will immediately affect all the different teams.\"\n\n## 3. We make code review impersonal\n\nEveryone is encouraged to [review each other's code](https://www.youtube.com/watch?v=XluG9mAQdSo&feature=youtu.be) or ask for input, and the focus of that review is firmly on improving the code. The approach is not to say, \"This is wrong, change it to this,\" which can be really demotivating. We use language like, \"Have you considered this?\" or \"What do you think about this?\"\n\nThis not only makes code review less scary for the person whose merge request is being reviewed, it also makes it less intimidating for other team members to weigh in on more senior team members' work.\n\n>\"Review is really something we all do together. Even the most junior person or just someone who doesn’t really know this part of the application yet, if they see something that doesn’t quite look right to them or something they might have a question about, it’s really useful if you make them feel free to comment on that.\" - Douwe\n\nBy removing the barriers to contribution and making it easy and encouraged to offer input, even where team members have less experience, we've built a culture around collaboration and learning from others' expertise. Fostering collaboration across different teams and functions is just one element of a DevOps culture – to learn more, watch our webcast, \"[Managing the DevOps Culture Shift](https://www.youtube.com/watch?v=py8c6-3zyKM&feature=youtu.be)\" on demand now.\n\n*How does your team encourage everyone to contribute? Tell us in the comments!*\n\n\u003C!-- cover image: https://unsplash.com/search/street-art?photo=PVw_vtpCGaM-->\n",[1137,696,9,722],{"slug":4115,"featured":6,"template":699},"ways-to-encourage-collaboration","content:en-us:blog:ways-to-encourage-collaboration.yml","Ways To Encourage Collaboration","en-us/blog/ways-to-encourage-collaboration.yml","en-us/blog/ways-to-encourage-collaboration",{"_path":4121,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4122,"content":4128,"config":4133,"_id":4135,"_type":13,"title":4136,"_source":15,"_file":4137,"_stem":4138,"_extension":18},"/en-us/blog/what-is-gitlab-flow",{"title":4123,"description":4124,"ogTitle":4123,"ogDescription":4124,"noIndex":6,"ogImage":4125,"ogUrl":4126,"ogSiteName":685,"ogType":686,"canonicalUrls":4126,"schema":4127},"The problem with Git flow","Learn why Git flow complicates the lifecycle and discover an alternative to streamline development.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681121/Blog/Hero%20Images/whatisgitlabflow.jpg","https://about.gitlab.com/blog/what-is-gitlab-flow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The problem with Git flow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2020-03-05\",\n      }",{"title":4123,"description":4124,"authors":4129,"heroImage":4125,"date":4130,"body":4131,"category":1584,"tags":4132},[852],"2020-03-05","\n  \u003Cscript type=\"application/ld+json\">\n  {\n    \"@context\": \"https://schema.org\",\n    \"@type\": \"BlogPosting\",\n    \"mainEntityOfPage\": {\n      \"@type\": \"WebPage\",\n      \"@id\": \"https://about.gitlab.com/blog/what-is-gitlab-flow/\"\n    },\n    \"headline\": \"The problem with Git flow\",\n    \"description\": \"Learn why Git flow complicates the lifecycle and discover an alternative to streamline development.\",\n    \"image\": \"https://about.gitlab.com/images/blogimages/whatisgitlabflow.jpg\",\n    \"author\": {\n      \"@type\": \"Organization\",\n      \"name\": \"GitLab\"\n    },\n    \"publisher\": {\n      \"@type\": \"Organization\",\n      \"name\": \"\",\n      \"logo\": {\n        \"@type\": \"ImageObject\",\n        \"url\": \"\"\n      }\n    },\n    \"datePublished\": \"2020-03-05\"\n  }\n  \u003C/script>\n\nSometimes, you can have too much of a good thing. That’s certainly true with [Git flow](https://nvie.com/posts/a-successful-git-branching-model/), a well-known software development workflow that offers several options but can bog down users.\n\nWe developed [GitLab Flow](/topics/version-control/what-is-gitlab-flow/) as the solution to eliminate messy complexity and streamline the development process. [GitLab Flow](/topics/version-control/what-is-gitlab-flow/) brings issue tracking to the Git workflow, simplifying the process and removing confusion.\n\n## The problem with Git flow\n\nTo understand how GitLab Flow works, it’s helpful to start by looking at the problems it tries to solve. In Git flow, there are two main pain points, both of which involve unnecessary branch switching.\n\nGit flow forces developers to use the `develop` branch rather than the `master` or default branch. Because most tools default to using the master, there’s a significant amount of branch switching involved. Another frustrating aspect are `release` and [hotfix](https://stackoverflow.com/questions/46729813/how-to-use-a-Gitflow-hotfix-branch) branches, which are overkill for most organizations and completely unnecessary in companies practicing continuous integration and continuous delivery.\n\nThat brings us to GitLab Flow, a simpler workflow and branching model that keeps everything simple and inclusive.\n\n## GitLab Flow: a streamlined branching strategy\n\nGitLab Flow is a simpler alternative to Git flow that combines feature-driven development and feature branches with issue tracking. GitLab Flow integrates the Git workflow with an issue tracking system, offering a simple, transparent, and effective way to work with Git.\n\nGitLab Flow is an approach to make the relationship between the code and the issue tracker more transparent. Each change to the codebase starts with an issue in the issue tracking system. When you’re done coding or want to discuss the code, you can open a merge request. When the code is ready, the reviewer will merge the branch into master, creating a merge commit that makes this event easily visible in the future. Using GitLab Flow, teams can deploy a new version of code by merging master into a `production` branch, enabling them to quickly identify what code is in the production environment. In this workflow, commits only flow downstream, ensuring that everything is tested in all environments.\n\nGitLab Flow prevents the overhead of releasing, tagging, and merging that accompanies Git flow.\n\nGitLab Flow in a nutshell:\n- All features and fixes first go to master\n- Allows for `production` or `stable` branches\n- Bug fixes/hotfix patches are cherry-picked from master\n\nRead more on here [GitLab Flow best practicies](/topics/version-control/what-are-gitlab-flow-best-practices/)\n\n## Breaking down the 10 stages of software development\n\nGitLab Flow is a way to move from the idea stage to production, all while keeping everyone informed and productive. We identified [10 key stages](/topics/version-control/what-is-gitlab-flow/#stages-of-software-development) of the development process that must happen in order for software to get into production. GitLab Flow makes it easy to account for all of them, while continuing to provide full visibility into the development lifecycle.\n\nBroadly speaking, GitLab Flow is broken down into three main areas: `feature` branch, `production` branch, and `release` branch.\n\nA `feature` branch is where the serious development work occurs. A developer creates a feature or bug fix branch and does all the work there rather than on a master branch. Once the work is complete, the developer creates a merge request to merge the work into the master branch.\n\nThe `production` branch is essentially a monolith – a single long-running production `release` branch rather than individual branches. It’s possible to create a tag for each deployable version to keep track of those details easily.\n\nThe last piece, the `release` branch, is key if you release software to customers. With every new release, you’ll create a stable branch from master and decide on a tag. If you need to do a patch release, be sure to cherry-pick critical bug fixes first, and don’t commit them directly to the stable or long-lived branch.\n\n## Follow the rules\n\nWant to get the most out of GitLab Flow? Our CEO [Sid Sijbrandij](/company/team/#sytses) came up with [11 rules teams should always follow to achieve maximum efficiency](/topics/version-control/what-are-gitlab-flow-best-practices/). The article is worth a read in its entirety, but here are a few rules that are timely reminders of the importance of testing, even in a [CI environment](/solutions/continuous-integration/):\n\n* **Test all commits**: Don’t wait to test until everything has been merged into `master`. Test commits along the way to catch problems earlier in the process.\n* **And run _all_ tests on all the commits**, even if you have to run tests in parallel.\n* **Code reviews > merging into `master`.** Why wait? \"Don’t test everything at the end of the week,\" Sid writes. \"Do it on the spot, because you'll be more likely to catch things that could cause problems, and others will also be working to come up with solutions.\"\n\n## Take a deep dive\n\nTake a look at GitLab Flow in action! 🍿\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/InKNIvky2KE\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\n\nCover image by [Fabio Bracht](https://unsplash.com/@bracht?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/_z0DiiaIhB4)\n{: .note}\n",[1684,9,1385],{"slug":4134,"featured":6,"template":699},"what-is-gitlab-flow","content:en-us:blog:what-is-gitlab-flow.yml","What Is Gitlab Flow","en-us/blog/what-is-gitlab-flow.yml","en-us/blog/what-is-gitlab-flow",{"_path":4140,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4141,"content":4147,"config":4152,"_id":4154,"_type":13,"title":4155,"_source":15,"_file":4156,"_stem":4157,"_extension":18},"/en-us/blog/whats-wrong-with-devops",{"title":4142,"description":4143,"ogTitle":4142,"ogDescription":4143,"noIndex":6,"ogImage":4144,"ogUrl":4145,"ogSiteName":685,"ogType":686,"canonicalUrls":4145,"schema":4146},"3 things that are wrong with DevOps today","Why are collaboration woes, shift-left waste, and tooling admin costs still plaguing DevOps?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680211/Blog/Hero%20Images/what-is-wrong-with-devops.jpg","https://about.gitlab.com/blog/whats-wrong-with-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 things that are wrong with DevOps today\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Joel Krooswyk\"}],\n        \"datePublished\": \"2018-02-20\",\n      }",{"title":4142,"description":4143,"authors":4148,"heroImage":4144,"date":4149,"body":4150,"category":718,"tags":4151},[2021],"2018-02-20","\n\nI’m continually impressed by the benefits achieved by modern ways of working. Lean processes, [Conversational Development](http://conversationaldevelopment.com/), and automation have helped us ship more value, faster. Those achievements have led customers to expect a lot more from their service providers. DevOps has been critical to those gains, but we’ve got more work to do – DevOps still has its problems.\n\n\u003C!-- more -->\n\nI have the privilege of talking with GitLab users every day. We celebrate impressive technical achievements, work through complex problems with CI/CD, or discuss new needs for their organization. The needs and problems seem to align themselves to one of three different areas:\n\n## 1. The wall still stands\n\nDev and Ops are still at war in some environments. In just the past couple of weeks I’ve heard the lack of collaboration between these groups called “the wall,” a “chasm,” and a “joke” by people in both areas! We’re simply not communicating well enough yet. We’re disappointed that after this much investment, there’s still so much room for improvement. Development and Operations continue to use different tools and to follow different rules.\n\n>It's like we're really doing DevSecBizPerfOps\n\nBut it doesn't end there. Now we've got more people in the mix analyzing concerns like security, performance, and business metrics. It's like we're really doing DevSecBizPerfOps or some such thing, and so our flow continues to be interrupted. Silos continue to exist, if not multiply. It also feels like Ops hasn’t gotten enough love, which is why GitLab is working toward better Operations views as part of our [product vision](/blog/devops-strategy/) for 2018.\n\n## 2. Administration costs are still too high\n\nAs we continue to [shift left with build, test, and security](/solutions/security-compliance/), admin costs continue to rise. Developers are often being empowered at the cost of their own productivity. Administration efforts can actually consume [half a developer’s time](https://www.infoworld.com/article/2613762/application-development/software-engineers-spend-lots-of-time-not-building-software.html) each week! Unfortunately, this is a growing form of waste. A core DevOps goal is to reduce administration time, but the admin costs of DevOps tools can be some of the highest in the software development lifecycle ecosystem due to extensive plug-in architectures, support of quickly evolving environments, and asynchronous vendor update woes. We continually increase complexity and add requirements to existing stacks without looking for more modern solutions. Despite all the loss of time, I still hear commonly that there's no way to visualize the flow of the code from requirement to production, especially once code is committed to a repository.\n\nThe good news is that more of us are taking the time to re-examine our ecosystems because they've become bloated with a wide variety of tools from a wide variety of vendors for very specific purposes. I wouldn't consider the current trend to be a tooling consolidation so much as a streamlining or simplification of toolsets. Questions I hear most often tend to focus on optimizing our efficiency and reliability while minimizing administration of laborious plug-in and trigger-driven architectures. We're trending in the right direction.\n\n## 3. We're holding onto the past\n\nWe’ve spent and continue to spend billions on software tools annually. Tooling can be extremely costly! Sometimes we’ve invested so much money in old tooling that we simply can’t let it go. Too often we hold onto tools and processes just because we spent a lot of time and money on them while newer, time-saving products are available for less than the cost of the renewal of the old beasts. And so we hold onto the past as we try to implement new technologies. It’s no surprise that shoving new technology into old tools can generate enormous friction and unique problems.\n\n>It’s no surprise that shoving new technology into old tools can generate enormous friction and unique problems.\n\nPerhaps we bought best-in-breed tools. Those products commonly require excessive coding efforts to integrate and maintain because \"best in breed\" typically means we bought from a number of vendors. Interconnectivity of those tools typically doesn’t come out of the box. And of course, once the API is mentioned as a solution, the admin and maintenance burden increases once again. We spend a lot of money on specific solutions but inevitably end up with holes in our end-to-end process, too often as it relates to security or performance.\n\nBut this way of looking at tooling is beginning to change! I'm hearing more frequently that dramatic price increases, as well as the outsourcing of product maintenance and support, are triggering enterprises to reconsider the past. When we've invested all that time and money into a product, but that product then gets sold to three different parent companies within a decade, our ROI calculations lose their luster. Outsourcings and vendor-level product sales are being viewed as indicators of a potentially declining market. Enterprises are using that as a trigger to seek out updated tools for the years ahead, reducing cost and enabling modern workflows.\n\n## It all impacts delivery efficiency\n\nNo matter whether we’re talking about disappointment in collaboration, shift-left waste, or tooling admin costs, it comes down to this: it all negatively impacts our ability to deliver securely with speed and efficiency. If we truly want to meet and exceed the expectations of our customers, we’ll need to continually hone and improve our DevOps processes and tools to reflect modern ways of working.\n\n[Photo](https://unsplash.com/photos/suaBxarUnyo?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) by Caleb George on [Unsplash](https://unsplash.com/search/photos/wall?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[722,9,696],{"slug":4153,"featured":6,"template":699},"whats-wrong-with-devops","content:en-us:blog:whats-wrong-with-devops.yml","Whats Wrong With Devops","en-us/blog/whats-wrong-with-devops.yml","en-us/blog/whats-wrong-with-devops",{"_path":4159,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4160,"content":4166,"config":4171,"_id":4173,"_type":13,"title":4174,"_source":15,"_file":4175,"_stem":4176,"_extension":18},"/en-us/blog/why-gitlab-ci-cd",{"title":4161,"description":4162,"ogTitle":4161,"ogDescription":4162,"noIndex":6,"ogImage":4163,"ogUrl":4164,"ogSiteName":685,"ogType":686,"canonicalUrls":4164,"schema":4165},"Why GitLab CI/CD?","With GitLab’s out-of-the-box CI/CD, you can spend less time maintaining and more time creating.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678657/Blog/Hero%20Images/ci-cd-competitive-campaign-blog-cover.png","https://about.gitlab.com/blog/why-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why GitLab CI/CD?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-04-02\",\n      }",{"title":4161,"description":4162,"authors":4167,"heroImage":4163,"date":4168,"body":4169,"category":300,"tags":4170},[1113],"2019-04-02","\nDevOps speed is a competitive advantage for businesses. According to DORA, [companies that deploy more frequently perform better in the market](https://cloudplatformonline.com/2018-state-of-devops.html). Everyone wants to be able to do their jobs better and deploy more frequently, but as the organization grows, speed bumps keep getting in the way:\n\n*   **Too many integration points** – Connecting [CI/CD](/topics/ci-cd/) to all of the different tools in a [DevOps toolchain](/topics/devops/) is confusing and keeps adding more steps and more points of failure to the process.\n*   **Brittle tools** – We're spending more time maintaining and updating these tools than actually creating new features.\n*   **Slow modernization** – We want to leverage [microservices](/topics/microservices/) and [cloud native](/topics/cloud-native/) development, but we spend too much time putting out fires.\n\nWith these speed bumps come complicated workflows, lack of pipeline visibility, and confusion about processes. With the Total Cost of Ownership (TCO) going up as more resources go to maintenance, teams can't afford to innovate. As organizations scale, these complexities only get worse.\n\nThat sounds exhausting, doesn't it?\n\n## Current CI/CD tools\n\nAt GitLab, we love transparency so much we made it [one of our core values](https://handbook.gitlab.com/handbook/values/#transparency). It's also why [we list all other DevOps tools on our website](/competition/) (no, really). We think open and direct communication is the fastest way to get the feedback you need to make the right decisions. For DevOps teams, the right tools should make things easier but we've found that _more_ doesn't always mean _better_.\n\n### High maintenance\n\nIntegrating CI/CD tools with the rest of your toolchain can get complicated – managing and updating these tools regularly isn't any easier. Many teams rely on tool experts just to keep everything running smoothly.\n\n### Lack of cloud native compatibility\n\nAs more organizations look to leverage microservices and [cloud native](/topics/cloud-native/) development, they'll need CI/CD tools that support modern architecture. With some CI/CD platforms, teams still need additional plugins to connect to Kubernetes or a container registry. Teams using legacy CI/CD tools will need to upgrade in order to gain those cloud native capabilities.\n\n### Toolchain complexity\n\nToolchains sometimes have too much in common with [Rube Goldberg devices](https://www.youtube.com/watch?v=qybUFnY7Y8w). Adding on more applications, more platforms, and more handoffs increases complexity that slows down teams. Add to that the maintenance, plugin, and upgrade requirements to manage these separate tools, and productivity gets harder.\n\n## Why teams love GitLab CI/CD\n\nCI/CD tools should make engineers' lives easier by giving them greater visibility into their pipelines, without burdening them with complicated integrations and plugin maintenance. GitLab CI/CD is designed to be simple so teams can start using it right away.\n\n### Easy to use\n\nGitLab uses a YAML configuration that any developer can understand so you can build pipelines faster.\n\n### Cloud native CI/CD\n\nWith its built-in container registry and Kubernetes integration, GitLab supports cloud native development.\n\n### Simple architecture\n\nOne integrated application with one set of permissions.\n\n### Fast and efficient\n\nWith autoscaling runners, developers no longer have to wait on builds, and VMs spin up or down automatically to process queues at a lower cost.\n\n### Everything in one place\n\nGitLab CI/CD is already built into the same application that contains source code management, planning, monitoring, etc.\n\nAs a single application for the entire DevOps lifecycle, everything is in one conversation and visible across teams. With GitLab's out-of-the-box CI/CD, you can spend less time maintaining and more time creating. It's CI/CD that _just works_.\n\nWe invite you to explore GitLab CI/CD for yourself, and see why we were rated #1 in the Forrester CI Wave™.\n\n[Explore GitLab CI/CD](/solutions/continuous-integration/)\n{: .alert .alert-gitlab-purple.text-center}\n",[108,722,9],{"slug":4172,"featured":6,"template":699},"why-gitlab-ci-cd","content:en-us:blog:why-gitlab-ci-cd.yml","Why Gitlab Ci Cd","en-us/blog/why-gitlab-ci-cd.yml","en-us/blog/why-gitlab-ci-cd",{"_path":4178,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4179,"content":4184,"config":4189,"_id":4191,"_type":13,"title":4192,"_source":15,"_file":4193,"_stem":4194,"_extension":18},"/en-us/blog/why-improving-continuously-speeds-up-delivery",{"title":4180,"description":4181,"ogTitle":4180,"ogDescription":4181,"noIndex":6,"ogImage":924,"ogUrl":4182,"ogSiteName":685,"ogType":686,"canonicalUrls":4182,"schema":4183},"Why improving continuously speeds up delivery","How do you keep pace with rapid changes in technology? The answer is continuous improvement.","https://about.gitlab.com/blog/why-improving-continuously-speeds-up-delivery","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why improving continuously speeds up delivery\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Jeremiah\"}],\n        \"datePublished\": \"2019-04-09\",\n      }",{"title":4180,"description":4181,"authors":4185,"heroImage":924,"date":4186,"body":4187,"category":718,"tags":4188},[929],"2019-04-09","\n\nI just finished Tom Friedman’s latest book “[Thank You for Being Late: An\nOptimist's Guide to Thriving in the Age of Accelerations](https://www.amazon.com/dp/B01F1Z0QHA),”\nin which he explores how our world is accelerating and everything is happening\nfaster and faster.  He explores the impact on business, society, economy, and\nenvironment. It’s a fantastic read – at times sobering and others exciting. I\nthink a fundamental takeaway from his research is that, from now on, business\nleaders must learn how to transform their organizations to operate at faster cycle\ntimes than ever before. While that sounds great, the obvious question is: How?\n\n## Operational efficiency and speed\n\nOne of the classic business books on operational efficiency and speed is Dr. Eli\nGoldratt’s classic, [“The Goal”](https://www.amazon.com/gp/product/0884271951).\nIn “The Goal,” the main character, Alex is a plant manager responsible for turning\naround a failing manufacturing plant. He learns a valuable lesson from his son’s\nscouting troop on a camping trip. As the group hikes into the woods, they spread\nout, because the slower hikers can’t keep up with the faster ones. No matter what\nAlex tries, he can't seem to keep them together. Then, he makes a small adjustment\nthat changes everything. He puts the slowest hiker in the front so that the entire\ntroop moves along at the speed of the slowest hiker. It’s the same in your\ndevelopment lifecycle: The fastest you can go depends on the most time-consuming\nstep in the [end-to-end value stream](/solutions/value-stream-management/).\n\nSo, how do you identify the most time-consuming step in your value stream? This\ndaunting task can be accomplished by adopting DevOps practices. In\n[“The Phoenix Project”](https://www.amazon.com/Phoenix-Project-DevOps-Helping-Business/dp/0988262592)\nand subsequent blog posts, Gene Kim describes the\n[“Three Ways”](https://itrevolution.com/the-three-ways-principles-underpinning-devops/)\nfrom which all DevOps patterns arise. These philosophies boil DevOps down to a set\nof three principles that can help organizations increase efficiency and speed by\ncarefully examining the value stream:\n\n1. **The First Way: Systems Thinking** – This first way is a flow of value from the business to the customer – or from Dev to Ops.\n1. **The Second Way: Amplify Feedback Loops** – The second way is to gather feedback from the customer, the business – or from Ops back to Dev.\n1. **The Third Way: Culture of Continual Experimentation and Learning** – Think of the third way as many smaller feedback loops of learning and improvement.\n\nWhat Alex learned in “The Goal” is an important lesson to remember: No matter\nwhat you change, you can only go as fast as the slowest. The same is true in your\nvalue stream. The principles of continuous improvement, exemplified by Gene’s\nThree Ways and [Kaizen](https://en.wikipedia.org/wiki/Kaizen) can be a powerful\nforce to help drive incremental and lasting change.\n\n## Continuous improvement through small changes\n\nWhy should you adopt a Kaizen approach?  Because it works. Kaizen is a strategy\nthat refers to continuous improvement through small changes that result in major\nimprovement. When applied in a business setting, Kaizen has significant impact\non culture, productivity, and quality.\n\nWhen teams practice continuous improvement, they;\n\n- Start with understanding their value stream.\n- Look for bottlenecks and waste.\n- Prioritize what to improve (remember the hikers).\n- Experiment with a minor change and learn.\n\nIn principle, continuous improvement and [DevOps isn’t difficult](/topics/devops/), if you approach\nit from a perspective of Kaizen and Gene Kim’s “Three Ways.” However, the\ncomplexity of fragmented toolchains and processes, siloed incentives, and lack\nof collaboration often get in the way of making lasting improvements in software\ndelivery.\n\n## Increase your DevOps success and reduce cycle time\n\nTo set the speed in the competitive race of software innovation, I have three suggestions:\n\n1. **Simplify your scope.** Focusing improvement efforts on one specific value \nstream at a time narrows your efforts to hone in on major problem areas rather\nthan becoming overwhelmed.\n1. **Empower your team.** Giving your delivery team the authority to experiment and\nimprove enables innovation to become a focus.   \n1. **Measure your value stream.** Understanding your cycle time and identifying \nbottlenecks enables you to take an objective look at what's slowing you down.\n\nIncreasing your DevOps success and reducing cycle time through continuous\nimprovement can help your organization continuously improve your value stream.\nAt GitLab, we’re helping teams reduce cycle time with our approach to DevOps,\nwhich unifies teams to focus on delivering value.\n\nAre you ready to reduce cycle\ntime? [Just commit.](/blog/strategies-to-reduce-cycle-times/)\n{: .alert .alert-gitlab-purple .text-center}\n",[722,9],{"slug":4190,"featured":6,"template":699},"why-improving-continuously-speeds-up-delivery","content:en-us:blog:why-improving-continuously-speeds-up-delivery.yml","Why Improving Continuously Speeds Up Delivery","en-us/blog/why-improving-continuously-speeds-up-delivery.yml","en-us/blog/why-improving-continuously-speeds-up-delivery",{"_path":4196,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4197,"content":4203,"config":4208,"_id":4210,"_type":13,"title":4211,"_source":15,"_file":4212,"_stem":4213,"_extension":18},"/en-us/blog/why-we-use-rails-to-build-gitlab",{"title":4198,"description":4199,"ogTitle":4198,"ogDescription":4199,"noIndex":6,"ogImage":4200,"ogUrl":4201,"ogSiteName":685,"ogType":686,"canonicalUrls":4201,"schema":4202},"Why we use Ruby on Rails to build GitLab","Here's our CEO on GitLab’s inception using Rails, and how challenges are being handled along the way.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668296/Blog/Hero%20Images/gitlab-ruby.jpg","https://about.gitlab.com/blog/why-we-use-rails-to-build-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why we use Ruby on Rails to build GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2018-10-29\",\n      }",{"title":4198,"description":4199,"authors":4204,"heroImage":4200,"date":4205,"body":4206,"category":300,"tags":4207},[1680],"2018-10-29","\nWhen our Co-founder and Engineering Fellow [Dmitriy Zaporozhets](/company/team/#dzaporozhets) decided to build GitLab, he chose to do it with Ruby on Rails, despite working primarily in PHP at the time. GitHub, a source of inspiration for GitLab, was also based on Rails, making it a logical pick considering his interest in the framework. GitLab CEO [Sid Sijbrandij](/company/team/#sytses) thinks his co-founder made a good choice:\n\n\"It's worked out really well because the Ruby on Rails ecosystem allows you to shape a lot of functionality at a high quality,\" he explained. \"If you look at GitLab, it has an enormous amount of functionality. Software development is very complex and to help with that, we need a lot of functionality and Ruby on Rails is a way to do it. Because there's all these best practices that are on your happy path, it’s also a way to keep the code consistent when you ship something like GitLab. You're kind of guided into doing the right thing.\"\n\n### Depending on useful gems\n\nRuby gems play an integral role in the building of GitLab, with it loading more than a thousand non-unique gems, according to Sid. Calling the Ruby on Rails framework \"very opinionated,\" he thinks it's a strong environment in which to build a complex app like GitLab.\n\n\"There's a great ecosystem around it with gems that can make assumptions about how you're doing things and in that regard, I think the Ruby on Rails ecosystem is still without par,\" he says. \"If you look at our Gemfile, it gives you an indication of how big the tower is of dependencies that we can build on. Ruby on Rails has amazing shoulders to stand on and it would have been much slower to develop GitLab in any other framework.\"\n\n### Overcoming challenges\n\nAll of this is not to say there haven’t been challenges in building GitLab with Ruby on Rails. Performance has been an issue that our developers have made strides to improve in a number of ways, including rewriting code in Go and [using the Vue framework](/blog/why-we-chose-vue/). The latter is being used to rewrite frequently accessed pages, like issues and merge requests, so they load faster, improving user experience.\n\nGo is being used to address other issues affecting load times and reduce memory usage.\n\n\"Ruby was optimized for the developer, not for running it in production,\" says Sid. \"For the things that get hit a lot and have to be very performant or that, for example, have to wait very long on a system IO, we rewrite those in Go … We are still trying to make GitLab use less memory. So, we'll need to enable multithreading. When we developed GitLab that was not common in the Ruby on Rails ecosystem. Now it's more common, but because we now have so much code and so many dependencies, it's going to be a longer path for us to get there. That should help; it won't make it blazingly fast, but at least it will use less memory.\"\n\nAdding Go to GitLab’s toolbox led to the creation of a separate service called [Gitaly](/blog/the-road-to-gitaly-1-0/), which handles all Git requests.\n\n### Building on GitLab’s mission\n\nThe organized, structured style of Ruby on Rails’ framework falls in line with our core mission. Because Rails is streamlined, anyone can jump into GitLab and participate, which made it especially attractive to Sid from the start.\n\n\"[Our mission is that everyone can contribute](/company/mission/#mission),\" he explains. \"Because Ruby on Rails is really opinionated about which pieces go where, it's much easier for new developers to get into the codebase, because you know where people have put stuff. For example, in every kitchen you enter, you never know where the knives and plates are located. But with Ruby on Rails, you enter the kitchen and it's always in the same place, and we want to stick to that.\n\n>In every kitchen you enter, you never know where the knives and plates are located. But with Ruby on Rails, you enter the kitchen and it's always in the same place, and we want to stick to that.\n\n\"I was really encouraged when I opened the project and saw it for the first time a year after Dmitriy started it. I opened it up and it's idiomatic Rails. He followed all the principles. He didn't try to experiment with some kind of fad that he was interested in. He made it into a production application. Dmitriy carefully vetted all the contributions to make sure they stick to those conventions, and that's still the case. I think we have a very nice codebase that allows other people to build on top of it. One of our sub-values is [boring solutions](https://handbook.gitlab.com/handbook/values/#efficiency): don't do anything fancy. This is so that others can build on top it. I think we've done that really well … and we're really thankful that Ruby has been such a stable, ecosystem for us to build on.\"\n\n[Cover image](https://unsplash.com/photos/0y6Y56Pw6DA) by [Elvir K](https://unsplash.com/@elvir) on Unsplash\n{: .note}\n",[696,268,2141,790,1074,743,9],{"slug":4209,"featured":6,"template":699},"why-we-use-rails-to-build-gitlab","content:en-us:blog:why-we-use-rails-to-build-gitlab.yml","Why We Use Rails To Build Gitlab","en-us/blog/why-we-use-rails-to-build-gitlab.yml","en-us/blog/why-we-use-rails-to-build-gitlab",{"_path":4215,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4216,"content":4222,"config":4227,"_id":4229,"_type":13,"title":4230,"_source":15,"_file":4231,"_stem":4232,"_extension":18},"/en-us/blog/why-your-code-review-process-is-broken-and-how-to-fix-it",{"title":4217,"description":4218,"ogTitle":4217,"ogDescription":4218,"noIndex":6,"ogImage":4219,"ogUrl":4220,"ogSiteName":685,"ogType":686,"canonicalUrls":4220,"schema":4221},"Why your code review process is broken, and how to fix it","What do you do when you follow your code review process, and you’re still rudely greeted by code full of bugs, or a flood of user complaints?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679775/Blog/Hero%20Images/why-your-code-review-process-is-broken-and-how-to-fix-it.jpg","https://about.gitlab.com/blog/why-your-code-review-process-is-broken-and-how-to-fix-it","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why your code review process is broken, and how to fix it\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Emily von Hoffmann\"}],\n        \"datePublished\": \"2017-07-25\",\n      }",{"title":4217,"description":4218,"authors":4223,"heroImage":4219,"date":4224,"body":4225,"category":811,"tags":4226},[2158],"2017-07-25","\nPeople in every field can relate to the feeling of carefully moving down your checklist, triple-checking your work, and confidently *sending that email*, or *posting that tweet*, or *merging those changes*, only to at some later interval experience unmistakable stomach-sinking at some surprise snafu. That’s why we identify areas with potential for human error and build in review cycles with hopefully explicit steps and goals — like code reviews! So what about when you follow all of those steps and you’re still rudely greeted by code full of bugs, or a flood of user complaints?\n\n\u003C!-- more -->\n\nIn other words, why exactly did your code review process “fail” to deliver what you designed it for? It’s not just overt technical errors we’re looking to avoid; our Discussion Product Manager [Victor Wu](/company/team/#victorwu416) told me that we can think of code review as being ineffective if it results in code being shipped that doesn’t meet business or product goals. In this case, poor code review contributes to the ultimate failure, and may lead the product to snowball over time, becoming harder to fix and add new features. Here are a few scenarios with some thoughts on what might have contributed to the code review breakdown.\n\n### Feature shipped with a lot of defects\n\nThis one is easy to identify, but maybe not always as easy to remediate. What broke in the process that allowed this to happen? It might have something to do with rushed or unrealistic deadlines handed to developers, which we heard in our [Global Developer Survey](https://page.gitlab.com/2016-developer-survey_2016-developer-survey.html) is a major reason code gets shipped before it’s ready. One option here might be to try a cross-functional team, or review the channels available for communication with teammates in a different function than your own — the key to [better deadline-setting](/blog/why-code-is-released-too-early/) is finding ways to develop empathy for other teams’ needs, and that won’t happen if you’re siloed.\n\nIt can be even trickier if the problem arises from within the culture of the dev team itself. There can be a power dynamic and intimidation factor inherent in the review process that could make a more junior reviewer, for example, not stick to their guns when their suggestions are insufficiently addressed. At GitLab, we follow best practices, largely based on the [thoughtbot code review guide](https://github.com/thoughtbot/guides/tree/master/code-review), that are designed to create an effective environment for code reviews.\n\n>There can be a power dynamic and intimidation factor inherent in the review process that could make a more junior reviewer, for example, not stick to their guns when their suggestions are insufficiently addressed\n\nThe guide contains truisms that could apply to any setting where one’s creation might be critiqued by a teammate, like `avoid using terms that could be seen as referring to personal traits`, and `if you don't understand a piece of code, say so. There's a good chance someone else would be confused by it as well` for the reviewer, and `don't take it personally. The review is of the code, not of you` and `be grateful for the reviewer's suggestions` for the reviewee. It’s important to have the right person reviewing, and for everyone to internalize the respect and balance between the reviewee and reviewer roles.\n\n### Feature shipped with poor usability or did not solve the underlying business problem\n\nWhat happened here is likely to do with the [dynamic between the business and engineering teams](/blog/your-engineers-need-to-understand-your-business-heres-why/). Engineers may feel disheartened by business managers who seem solely concerned with functionality. This disregard can be reciprocal, with engineers focusing on delivering quality work but unconcerned with the business and the end users.\n\nIt’s not uncommon for engineers to be excluded from business discussions, until requirements are [thrown over the wall](/blog/your-engineers-need-to-understand-your-business-heres-why/) at them — this lack of alignment creates inefficiencies that can have long-term consequences. Engineers may feel uneasy about the timeline or the product direction, or they may simply feel whatever’s being asked of them is a bad idea. If their organization doesn’t have a channel open for them to discuss their concerns, they might feel they have no choice but to go along with it. Ideally, dev teams today will be heavily involved in business discussions, and they’ll have the responsibilities to match.\n\n>Ideally, dev teams today will be heavily involved in business discussions, and they’ll have the responsibilities to match\n\n### Feature shipped BUT...\n\nIt might be the case that all seems well when a feature ships, but going forward it takes much more time to develop new features, and there are many brittle edge cases. Victor told me that in this case, it’s more likely that the architecture is simply inadequate, and not enough effort was made to clean up tech debt. This is not the opportunistic tradeoff of tech debt and time to market that many startups weigh; it’s when tech debt feels like it’s spiraled out of control. This might be the confluence of the poor dynamics we’ve discussed above, with engineers pressed for time, [burned out and working long hours](https://codewithoutrules.com/2017/06/21/why-company-want-long-hours/), and perhaps feeling unable to push back against business demands.\n\nOn his blog *Code Without Rules*, Itamar Turner-Trauring [explains several possible reasons](https://codewithoutrules.com/2017/06/21/why-company-want-long-hours/) why organizations might have unhappy developers unable to do their best work, and he offers some tips for how individual developers might be able to regain some control over their work lives.\n\nWhat are other scenarios you’ve experienced? Leave a comment and let us know.\n",[696,9],{"slug":4228,"featured":6,"template":699},"why-your-code-review-process-is-broken-and-how-to-fix-it","content:en-us:blog:why-your-code-review-process-is-broken-and-how-to-fix-it.yml","Why Your Code Review Process Is Broken And How To Fix It","en-us/blog/why-your-code-review-process-is-broken-and-how-to-fix-it.yml","en-us/blog/why-your-code-review-process-is-broken-and-how-to-fix-it",{"_path":4234,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4235,"content":4241,"config":4246,"_id":4248,"_type":13,"title":4249,"_source":15,"_file":4250,"_stem":4251,"_extension":18},"/en-us/blog/working-on-two-git-branches-at-the-same-time",{"title":4236,"description":4237,"ogTitle":4236,"ogDescription":4237,"noIndex":6,"ogImage":4238,"ogUrl":4239,"ogSiteName":685,"ogType":686,"canonicalUrls":4239,"schema":4240},"How to work on two Git branches at the same time","Watch the demo on how using the GitLab Web IDE and your local dev environment to work on two branches at once can help save time.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678782/Blog/Hero%20Images/working-on-two-git-branches-at-the-same-time.jpg","https://about.gitlab.com/blog/working-on-two-git-branches-at-the-same-time","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to work on two Git branches at the same time\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2018-10-03\",\n      }",{"title":4236,"description":4237,"authors":4242,"heroImage":4238,"date":4243,"body":4244,"category":832,"tags":4245},[1176],"2018-10-03","\nI was recently using both my local development environment and the GitLab [Web IDE](/blog/introducing-gitlab-s-integrated-development-environment/), and found a really nice workflow for working with two Git branches simultaneously.\n\n### The problem\n\nIn this scenario, you’re doing development work on one branch, in one part of your codebase, and then likely documenting your process in another place. I really don’t want all of this in one merge request, because I don’t want to delay shipping the development work if [the docs](https://docs.gitlab.com) aren’t done. I want to be able to get it live so that others can see it, give feedback on each individual component, and iterate on it. At the same time, I don’t want to delay too long on documenting the process, because I want the docs to be as accurate and reproducible as possible.\n\n### The fix\n\nWhile doing my development work in my local development environment, I created another merge request for the documentation using the [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/), essentially working on two different Git branches at the same time, using two different editors.\n\nIn my quick example below, you can see a merge request to add Jenkins content to our [DevOps tools](/competition/) page. I’ve checked out this branch locally, and I have it open in my Atom editor. I’ve been doing some work by updating `features.yml`, as well as a Markdown file and a Haml file. All of these changes are related to one merge request. While I’m committing changes locally to the comparison page, I’m documenting each step in my Web IDE in a separate tab, to make sure my instructions are precise, helpful, and completed in real time.\n\n### Watch the demo\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/uV3ycYnwhBc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nYou can see what we've got planned for the Web IDE in 2019 in our post about [our product vision for DevOps Create](/blog/create-vision/).\n\nWhat are other ways the Web IDE has come in handy for you? Let us know by tweeting us [@gitlab](https://twitter.com/gitlab)!\n\nCover [photo](https://unsplash.com/photos/3y1zF4hIPCg) by [Hans-Peter Gauster](https://unsplash.com/photos/3y1zF4hIPCg) on Unsplash\n{: .note}\n",[974,834,1684,790,9],{"slug":4247,"featured":6,"template":699},"working-on-two-git-branches-at-the-same-time","content:en-us:blog:working-on-two-git-branches-at-the-same-time.yml","Working On Two Git Branches At The Same Time","en-us/blog/working-on-two-git-branches-at-the-same-time.yml","en-us/blog/working-on-two-git-branches-at-the-same-time",{"_path":4253,"_dir":4254,"_draft":6,"_partial":6,"_locale":7,"content":4255,"config":4261,"_id":4263,"_type":13,"title":4264,"_source":15,"_file":4265,"_stem":4266,"_extension":18},"/en-us/blog/external-url/overcome-ai-sprawl-with-a-value-stream-management-approach","external-url",{"title":4256,"description":4257,"heroImage":4258,"date":4259,"category":693,"tags":4260},"Overcome AI sprawl with a Value Stream Management approach","From The Source: Learn how an AI strategy based on Value Stream Management can stop AI sprawl and supply chain constraints and drive ROI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665000/Blog/Hero%20Images/display-the-source-article-overcome-ai-sprawl-image-0492-1800x945-fy25.png","2025-01-06",[1159,9,1074],{"featured":6,"externalUrl":4262},"https://about.gitlab.com/the-source/ai/overcome-ai-sprawl-with-a-value-stream-management-approach/","content:en-us:blog:external-url:overcome-ai-sprawl-with-a-value-stream-management-approach.yml","Overcome Ai Sprawl With A Value Stream Management Approach","en-us/blog/external-url/overcome-ai-sprawl-with-a-value-stream-management-approach.yml","en-us/blog/external-url/overcome-ai-sprawl-with-a-value-stream-management-approach",{"_path":4268,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":4269,"content":4275,"config":4280,"_id":4282,"_type":13,"title":4283,"_source":15,"_file":4284,"_stem":4285,"_extension":18},"/en-us/blog/15-git-tips-improve-workflow",{"title":4270,"description":4271,"ogTitle":4270,"ogDescription":4271,"noIndex":6,"ogImage":4272,"ogUrl":4273,"ogSiteName":685,"ogType":686,"canonicalUrls":4273,"schema":4274},"15 Git tips to improve your workflow","Learn how to compare commits, delete stale branches, and write aliases to save you some time. It's time to dust off your command line and Git busy!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681222/Blog/Hero%20Images/git-15th-anniversary-cover.png","https://about.gitlab.com/blog/15-git-tips-improve-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"15 Git tips to improve your workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2020-04-07\",\n      }",{"title":4270,"description":4271,"authors":4276,"heroImage":4272,"date":4277,"body":4278,"category":1584,"tags":4279},[852],"2020-04-07","This year, [Git](https://git-scm.com/) celebrates its 15th anniversary, and\nwe’ve been excitedly posting some thoughts about its creation and impact —\nfrom sharing our experience at [Git Merge\n2020](/blog/git-merge-fifteen-year-git-party/), discussing [the\nproblem with Git flow](/blog/what-is-gitlab-flow/), or\nhighlighting the newest Git feature [Partial\nClone](/blog/partial-clone-for-massive-repositories/).\n\n\nWhether you’re just getting started with Git, or you know your way around a\ncommand line, it’s always nice to brush up on your skills, which is why\nwe’ve gathered 15 methods to improve your Git-based workflow.\n\n\n### 1. Git aliases\n\n\nOne of the most impactful ways to improve your daily workflow is to create\naliases for common commands to save you some time in the terminal.\n\n\nYou can use the following commands to create aliases for the most-used Git\ncommands, `checkout`, `commit` and `branch`.\n\n\n```\n\ngit config --global alias.co checkout\n\ngit config --global alias.ci commit\n\ngit config --global alias.br branch\n\n```\n\n\nInstead of typing `git checkout master`, you only need to type `git co\nmaster`.\n\n\nYou could also edit these commands or add more by modifying the\n`~/.gitconfig` file directly:\n\n\n```\n\n[alias]\n    co = checkout\n    ci = commit\n    br = branch\n```\n\n\n### 2. See the repository status in your terminal’s prompt\n\n\nIf you’d like to visualize the status of your repository, you can run\n`git-prompt.sh`\n\n(you can [download\nit](https://github.com/git/git/blob/master/contrib/completion/git-prompt.sh)\nand follow the\n\ninstructions to use it in your system). If you're using Linux\n\nand have installed Git with your package manager, it may already be\n\npresent on your system, likely under `/etc/bash_completion.d/`.\n\n\nYou can replace your standard shell prompt with something a bit more\nexciting:\n\n\n![Git shell\nprompt](https://about.gitlab.com/images/blogimages/git-tricks/git-shell-info.png){:\n.shadow}\n\n\n_Taken from oh-my-zsh's [themes\nwiki](https://github.com/robbyrussell/oh-my-zsh/wiki/Themes#kafeitu)._\n\n\n### 3. Compare commits from the command line\n\n\nA simple way to compare the differences between commits or versions of the\nsame file is to use the `git diff` command.\n\n\nIf you want to compare the same file between different commits, you run the\nfollowing:\n\n\n```\n\n$ git diff $start_commit..$end_commit -- path/to/file\n\n```\n\n\nIf you want to compare the changes between two commits:\n\n\n```\n\n$ git diff $start_commit..$end_commit\n\n```\n\n\nThese commands will open the diff view inside the terminal, but if you\nprefer to use a more visual tool to compare your diffs, you can use `git\ndifftool`. [Meld](https://meldmerge.org/) is a useful viewer/editor to\nvisually compare diffs.\n\n\nTo configure Meld:\n\n\n```\n\n$ git config --global diff.tool git-meld\n\n```\n\n\nTo start viewing the diffs:\n\n\n```\n\n$ git difftool $start_commit..$end_commit -- path/to/file\n\n# or\n\n$ git difftool $start_commit..$end_commit\n\n```\n\n\n### 4. Stashing uncommitted changes\n\n\nIf you’re ever working on a feature and need to do an emergency fix on the\nproject, you could run into a problem. You don’t want to commit an\nunfinished feature, and you also don’t want to lose current changes. The\nsolution is to temporarily remove these changes with the Git stash command:\n\n\n```\n\n$ git stash\n\n```\n\n\nThe git stash command hides changes, giving you a clean working directory\nand the ability to switch to a new branch to make updates, without having to\ncommit a meaningless snapshot in order to save the current state.\n\n\nOnce you’re done working on a fix and want to revisit your previous changes,\nyou can run:\n\n\n```\n\n$ git stash pop\n\n```\n\n\nAnd your changes will be recovered. 🎉\n\n\nIf you no longer need those changes and want to clear the stash stack, you\ncan do so with:\n\n\n```\n\n$ git stash drop\n\n```\n\n\n### 5. Pull frequently\n\n\nIf you’re using [GitLab Flow](/solutions/gitlab-flow/), then you’re working\n\non feature branches. Depending on how long your feature takes to implement,\nthere might be several changes made to the master branch. In order to avoid\nmajor conflicts, you should frequently pull the changes from the master\nbranch to your branch to resolve any conflicts as soon as possible and to\nmake merging your branch to master easier.\n\n\n### 6. Autocomplete commands\n\n\nUsing [completion\nscripts](https://github.com/git/git/tree/master/contrib/completion), you can\nquickly create the commands for `bash`, `tcsh` and `zsh`. If you want to\ntype `git pull`, you can type just the first letter with `git p` followed by\n\u003Ckbd>Tab\u003C/kbd> will show the following:\n\n\n```\n\npack-objects   -- create packed archive of objects\n\npack-redundant -- find redundant pack files\n\npack-refs      -- pack heads and tags for efficient repository access\n\nparse-remote   -- routines to help parsing remote repository access\nparameters\n\npatch-id       -- compute unique ID for a patch\n\nprune          -- prune all unreachable objects from the object database\n\nprune-packed   -- remove extra objects that are already in pack files\n\npull           -- fetch from and merge with another repository or local\nbranch\n\npush           -- update remote refs along with associated objects\n\n```\n\n\nTo show all available commands, type `git` in your terminal followed by\n\n\u003Ckbd>Tab\u003C/kbd>+ \u003Ckbd>Tab\u003C/kbd>.\n\n\n### 7. Set a global `.gitignore`\n\n\nIf you want to avoid committing files like `.DS_Store` or Vim `swp` files,\n\nyou can set up a global `.gitignore` file.\n\n\nCreate the file:\n\n\n```bash\n\ntouch ~/.gitignore\n\n```\n\n\nThen run:\n\n\n```bash\n\ngit config --global core.excludesFile ~/.gitignore\n\n```\n\n\nOr manually add the following to your `~/.gitconfig`:\n\n\n```ini\n\n[core]\n  excludesFile = ~/.gitignore\n```\n\nYou can create a list of the things you want Git to ignore. To learn more,\nvisit the [gitignore documentation](https://git-scm.com/docs/gitignore).\n\n\n### 8. Enable Git’s autosquash feature by default\n\n\nAutosquash makes it easier to squash commits during an interactive rebase.\nIt can be enabled for each rebase using `git rebase -i --autosquash`, but\nit's easier to turn it on by default.\n\n\n```bash\n\ngit config --global rebase.autosquash true\n\n```\n\n\nOr manually add the following to your `~/.gitconfig`:\n\n\n```ini\n\n[rebase]\n  autosquash = true\n```\n\n\n### 9. Delete local branches that have been removed from remote on\nfetch/pull\n\n\nYou likely have stale branches in your local repository that no longer exist\nin the remote one. To delete them in each fetch/pull, run:\n\n\n```bash\n\ngit config --global fetch.prune true\n\n```\n\n\nOr manually add the following to your `~/.gitconfig`:\n\n\n```ini\n\n[fetch]\n  prune = true\n```\n\n\n### 10. Use Git blame more efficiently\n\n\nGit blame is a handy way to discover who changed a line in a file. Depending\non what you want to show, you can pass different flags:\n\n\n```\n\n$ git blame -w  # ignores white space\n\n$ git blame -M  # ignores moving text\n\n$ git blame -C  # ignores moving text into other files\n\n```\n\n\n### 11. Add an alias to check out merge requests locally\n\n\nA [merge request](https://docs.gitlab.com/ee/user/project/merge_requests/)\ncontains all the history from a repository, and the additional\n\ncommits added to the branch associated with the MR. You can check out a\npublic merge request locally even if the source project is a fork (even a\nprivate fork) of the target project.\n\n\nTo check out a merge request locally, add the following alias to your\n`~/.gitconfig`:\n\n\n```\n\n[alias]\n  mr = !sh -c 'git fetch $1 merge-requests/$2/head:mr-$1-$2 && git checkout mr-$1-$2' -\n```\n\n\nNow you can check out a merge request from any repository and any remote.\nFor example, to check out the merge request with ID 5 as shown in GitLab\n\nfrom the `upstream` remote, run:\n\n\n```\n\ngit mr upstream 5\n\n```\n\n\nThis will fetch the merge request into a local `mr-upstream-5` branch and\ncheck\n\nit out. In the above example, `upstream` is the remote that points to GitLab\n\nwhich you can find out by running `git remote -v`.\n\n\n### 12. An alias of `HEAD`\n\n\nBreaking news: `@` is the same as `HEAD`. Using it during a rebase is a\nlifesaver:\n\n\n```bash\n\ngit rebase -i @~2\n\n```\n\n\n### 13. Resetting files\n\n\nYou’re modifying your code when you suddenly realize that the changes you\nmade are not great, and you’d like to reset them. Rather than clicking undo\non everything you edited, you can reset your files to the HEAD of the\nbranch:\n\n\n```\n\n$ git reset --hard HEAD\n\n```\n\n\nOr if you want to reset a single file:\n\n\n```\n\n$ git checkout HEAD -- path/to/file\n\n```\n\n\nNow, if you already committed your changes, but still want to revert back,\nyou can use:\n\n\n```\n\n$ git reset --soft HEAD~1\n\n```\n\n\n### 14. The `git-open` plugin\n\n\nIf you’d like to quickly visit the website that hosts the repository you’re\non, you’ll need `git-open`.\n\n\n[Install it](https://github.com/paulirish/git-open#installation) and take it\nfor a spin by cloning a repository from\n\n[GitLab.com](https://gitlab.com/explore). From your terminal, navigate to\nthe\n\nrepository and run `git open` to be transferred to the project’s page on\n\nGitLab.com.\n\n\nThe plugin works by default for projects hosted on GitLab.com, but you can\nalso use it\n\nwith your own GitLab instances. In that case, set up the domain name with:\n\n\n```bash\n\ngit config gitopen.gitlab.domain git.example.com\n\n```\n\n\nYou can open different remotes and branches if they have been set up. You\ncan learn more by checking out the [examples\nsection](https://github.com/paulirish/git-open#examples).\n\n\n### 15. The `git-extras` plugin\n\n\nIf you want to elevate Git with more commands, try out the\n\n[`git-extras` plugin](https://github.com/tj/git-extras), which includes `git\ninfo` (show\n\ninformation about the repository) and `git effort` (number of commits per\nfile).\n\n\n## Learn more about Git\n\n\nWe’re excited to announce that [Brendan O’Leary](/company/team/#brendan),\nsenior developer evangelist, will create 15 videos to celebrate Git's\nanniversary over the next several months. He’ll focus on a variety of\ntopics, from rebasing and merging to cherry-picking and branching. Take a\nlook at the first video in the series. 🍿\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/9oDNBuive-g\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\nCover image by [Brooke\nLark](https://unsplash.com/@brookelark?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non\n[Unsplash](https://unsplash.com/s/photos/birthday?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n\n{: .note}\n",[1684,9,1385],{"slug":4281,"featured":6,"template":699},"15-git-tips-improve-workflow","content:en-us:blog:15-git-tips-improve-workflow.yml","15 Git Tips Improve Workflow","en-us/blog/15-git-tips-improve-workflow.yml","en-us/blog/15-git-tips-improve-workflow",21,[678,704,729,751,774,798,819,842,862],1758662352040]